1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/CaptureTracking.h"
19 #include "llvm/Analysis/LazyValueInfo.h"
20 #include "llvm/Analysis/MemoryBuiltins.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/IRBuilder.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/NoFolder.h"
25 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 
28 #include <cassert>
29 
30 using namespace llvm;
31 
32 #define DEBUG_TYPE "attributor"
33 
34 static cl::opt<bool> ManifestInternal(
35     "attributor-manifest-internal", cl::Hidden,
36     cl::desc("Manifest Attributor internal string attributes."),
37     cl::init(false));
38 
39 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
40                                        cl::Hidden);
41 
42 // Some helper macros to deal with statistics tracking.
43 //
44 // Usage:
45 // For simple IR attribute tracking overload trackStatistics in the abstract
46 // attribute and choose the right STATS_DECLTRACK_********* macro,
47 // e.g.,:
48 //  void trackStatistics() const override {
49 //    STATS_DECLTRACK_ARG_ATTR(returned)
50 //  }
51 // If there is a single "increment" side one can use the macro
52 // STATS_DECLTRACK with a custom message. If there are multiple increment
53 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
54 //
55 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
56   ("Number of " #TYPE " marked '" #NAME "'")
57 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
58 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
59 #define STATS_DECL(NAME, TYPE, MSG)                                            \
60   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
61 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
62 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
63   {                                                                            \
64     STATS_DECL(NAME, TYPE, MSG)                                                \
65     STATS_TRACK(NAME, TYPE)                                                    \
66   }
67 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
68   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
69 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
70   STATS_DECLTRACK(NAME, CSArguments,                                           \
71                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
72 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
73   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
74 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
75   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
76 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
77   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
78                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
79 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
80   STATS_DECLTRACK(NAME, CSReturn,                                              \
81                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
82 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
83   STATS_DECLTRACK(NAME, Floating,                                              \
84                   ("Number of floating values known to be '" #NAME "'"))
85 
86 // Specialization of the operator<< for abstract attributes subclasses. This
87 // disambiguates situations where multiple operators are applicable.
88 namespace llvm {
89 #define PIPE_OPERATOR(CLASS)                                                   \
90   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
91     return OS << static_cast<const AbstractAttribute &>(AA);                   \
92   }
93 
94 PIPE_OPERATOR(AAIsDead)
95 PIPE_OPERATOR(AANoUnwind)
96 PIPE_OPERATOR(AANoSync)
97 PIPE_OPERATOR(AANoRecurse)
98 PIPE_OPERATOR(AAWillReturn)
99 PIPE_OPERATOR(AANoReturn)
100 PIPE_OPERATOR(AAReturnedValues)
101 PIPE_OPERATOR(AANonNull)
102 PIPE_OPERATOR(AANoAlias)
103 PIPE_OPERATOR(AADereferenceable)
104 PIPE_OPERATOR(AAAlign)
105 PIPE_OPERATOR(AANoCapture)
106 PIPE_OPERATOR(AAValueSimplify)
107 PIPE_OPERATOR(AANoFree)
108 PIPE_OPERATOR(AAHeapToStack)
109 PIPE_OPERATOR(AAReachability)
110 PIPE_OPERATOR(AAMemoryBehavior)
111 PIPE_OPERATOR(AAMemoryLocation)
112 PIPE_OPERATOR(AAValueConstantRange)
113 PIPE_OPERATOR(AAPrivatizablePtr)
114 
115 #undef PIPE_OPERATOR
116 } // namespace llvm
117 
118 namespace {
119 
120 static Optional<ConstantInt *>
121 getAssumedConstantInt(Attributor &A, const Value &V,
122                       const AbstractAttribute &AA,
123                       bool &UsedAssumedInformation) {
124   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
125   if (C.hasValue())
126     return dyn_cast_or_null<ConstantInt>(C.getValue());
127   return llvm::None;
128 }
129 
130 /// Get pointer operand of memory accessing instruction. If \p I is
131 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
132 /// is set to false and the instruction is volatile, return nullptr.
133 static const Value *getPointerOperand(const Instruction *I,
134                                       bool AllowVolatile) {
135   if (auto *LI = dyn_cast<LoadInst>(I)) {
136     if (!AllowVolatile && LI->isVolatile())
137       return nullptr;
138     return LI->getPointerOperand();
139   }
140 
141   if (auto *SI = dyn_cast<StoreInst>(I)) {
142     if (!AllowVolatile && SI->isVolatile())
143       return nullptr;
144     return SI->getPointerOperand();
145   }
146 
147   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
148     if (!AllowVolatile && CXI->isVolatile())
149       return nullptr;
150     return CXI->getPointerOperand();
151   }
152 
153   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
154     if (!AllowVolatile && RMWI->isVolatile())
155       return nullptr;
156     return RMWI->getPointerOperand();
157   }
158 
159   return nullptr;
160 }
161 
162 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
163 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
164 /// getelement pointer instructions that traverse the natural type of \p Ptr if
165 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
166 /// through a cast to i8*.
167 ///
168 /// TODO: This could probably live somewhere more prominantly if it doesn't
169 ///       already exist.
170 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
171                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
172   assert(Offset >= 0 && "Negative offset not supported yet!");
173   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
174                     << "-bytes as " << *ResTy << "\n");
175 
176   // The initial type we are trying to traverse to get nice GEPs.
177   Type *Ty = Ptr->getType();
178 
179   SmallVector<Value *, 4> Indices;
180   std::string GEPName = Ptr->getName().str();
181   while (Offset) {
182     uint64_t Idx, Rem;
183 
184     if (auto *STy = dyn_cast<StructType>(Ty)) {
185       const StructLayout *SL = DL.getStructLayout(STy);
186       if (int64_t(SL->getSizeInBytes()) < Offset)
187         break;
188       Idx = SL->getElementContainingOffset(Offset);
189       assert(Idx < STy->getNumElements() && "Offset calculation error!");
190       Rem = Offset - SL->getElementOffset(Idx);
191       Ty = STy->getElementType(Idx);
192     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
193       Ty = PTy->getElementType();
194       if (!Ty->isSized())
195         break;
196       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
197       assert(ElementSize && "Expected type with size!");
198       Idx = Offset / ElementSize;
199       Rem = Offset % ElementSize;
200     } else {
201       // Non-aggregate type, we cast and make byte-wise progress now.
202       break;
203     }
204 
205     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
206                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
207 
208     GEPName += "." + std::to_string(Idx);
209     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
210     Offset = Rem;
211   }
212 
213   // Create a GEP if we collected indices above.
214   if (Indices.size())
215     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
216 
217   // If an offset is left we use byte-wise adjustment.
218   if (Offset) {
219     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
220     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
221                         GEPName + ".b" + Twine(Offset));
222   }
223 
224   // Ensure the result has the requested type.
225   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
226 
227   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
228   return Ptr;
229 }
230 
231 /// Recursively visit all values that might become \p IRP at some point. This
232 /// will be done by looking through cast instructions, selects, phis, and calls
233 /// with the "returned" attribute. Once we cannot look through the value any
234 /// further, the callback \p VisitValueCB is invoked and passed the current
235 /// value, the \p State, and a flag to indicate if we stripped anything.
236 /// Stripped means that we unpacked the value associated with \p IRP at least
237 /// once. Note that the value used for the callback may still be the value
238 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
239 /// we will never visit more values than specified by \p MaxValues.
240 template <typename AAType, typename StateTy>
241 static bool genericValueTraversal(
242     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
243     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
244         VisitValueCB,
245     const Instruction *CtxI, int MaxValues = 16,
246     function_ref<Value *(Value *)> StripCB = nullptr) {
247 
248   const AAIsDead *LivenessAA = nullptr;
249   if (IRP.getAnchorScope())
250     LivenessAA = &A.getAAFor<AAIsDead>(
251         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
252         /* TrackDependence */ false);
253   bool AnyDead = false;
254 
255   using Item = std::pair<Value *, const Instruction *>;
256   SmallSet<Item, 16> Visited;
257   SmallVector<Item, 16> Worklist;
258   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
259 
260   int Iteration = 0;
261   do {
262     Item I = Worklist.pop_back_val();
263     Value *V = I.first;
264     CtxI = I.second;
265     if (StripCB)
266       V = StripCB(V);
267 
268     // Check if we should process the current value. To prevent endless
269     // recursion keep a record of the values we followed!
270     if (!Visited.insert(I).second)
271       continue;
272 
273     // Make sure we limit the compile time for complex expressions.
274     if (Iteration++ >= MaxValues)
275       return false;
276 
277     // Explicitly look through calls with a "returned" attribute if we do
278     // not have a pointer as stripPointerCasts only works on them.
279     Value *NewV = nullptr;
280     if (V->getType()->isPointerTy()) {
281       NewV = V->stripPointerCasts();
282     } else {
283       CallSite CS(V);
284       if (CS && CS.getCalledFunction()) {
285         for (Argument &Arg : CS.getCalledFunction()->args())
286           if (Arg.hasReturnedAttr()) {
287             NewV = CS.getArgOperand(Arg.getArgNo());
288             break;
289           }
290       }
291     }
292     if (NewV && NewV != V) {
293       Worklist.push_back({NewV, CtxI});
294       continue;
295     }
296 
297     // Look through select instructions, visit both potential values.
298     if (auto *SI = dyn_cast<SelectInst>(V)) {
299       Worklist.push_back({SI->getTrueValue(), CtxI});
300       Worklist.push_back({SI->getFalseValue(), CtxI});
301       continue;
302     }
303 
304     // Look through phi nodes, visit all live operands.
305     if (auto *PHI = dyn_cast<PHINode>(V)) {
306       assert(LivenessAA &&
307              "Expected liveness in the presence of instructions!");
308       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
309         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
310         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
311                             LivenessAA,
312                             /* CheckBBLivenessOnly */ true)) {
313           AnyDead = true;
314           continue;
315         }
316         Worklist.push_back(
317             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
318       }
319       continue;
320     }
321 
322     // Once a leaf is reached we inform the user through the callback.
323     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
324       return false;
325   } while (!Worklist.empty());
326 
327   // If we actually used liveness information so we have to record a dependence.
328   if (AnyDead)
329     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
330 
331   // All values have been visited.
332   return true;
333 }
334 
335 static const Value *
336 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
337                                      const DataLayout &DL,
338                                      bool AllowNonInbounds = false) {
339   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
340   if (!Ptr)
341     return nullptr;
342 
343   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
344                                           AllowNonInbounds);
345 }
346 
347 /// Helper function to clamp a state \p S of type \p StateType with the
348 /// information in \p R and indicate/return if \p S did change (as-in update is
349 /// required to be run again).
350 template <typename StateType>
351 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
352   auto Assumed = S.getAssumed();
353   S ^= R;
354   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
355                                    : ChangeStatus::CHANGED;
356 }
357 
358 /// Clamp the information known for all returned values of a function
359 /// (identified by \p QueryingAA) into \p S.
360 template <typename AAType, typename StateType = typename AAType::StateType>
361 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
362                                      StateType &S) {
363   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
364                     << QueryingAA << " into " << S << "\n");
365 
366   assert((QueryingAA.getIRPosition().getPositionKind() ==
367               IRPosition::IRP_RETURNED ||
368           QueryingAA.getIRPosition().getPositionKind() ==
369               IRPosition::IRP_CALL_SITE_RETURNED) &&
370          "Can only clamp returned value states for a function returned or call "
371          "site returned position!");
372 
373   // Use an optional state as there might not be any return values and we want
374   // to join (IntegerState::operator&) the state of all there are.
375   Optional<StateType> T;
376 
377   // Callback for each possibly returned value.
378   auto CheckReturnValue = [&](Value &RV) -> bool {
379     const IRPosition &RVPos = IRPosition::value(RV);
380     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
381     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
382                       << " @ " << RVPos << "\n");
383     const StateType &AAS = static_cast<const StateType &>(AA.getState());
384     if (T.hasValue())
385       *T &= AAS;
386     else
387       T = AAS;
388     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
389                       << "\n");
390     return T->isValidState();
391   };
392 
393   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
394     S.indicatePessimisticFixpoint();
395   else if (T.hasValue())
396     S ^= *T;
397 }
398 
399 /// Helper class to compose two generic deduction
400 template <typename AAType, typename Base, typename StateType,
401           template <typename...> class F, template <typename...> class G>
402 struct AAComposeTwoGenericDeduction
403     : public F<AAType, G<AAType, Base, StateType>, StateType> {
404   AAComposeTwoGenericDeduction(const IRPosition &IRP)
405       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP) {}
406 
407   void initialize(Attributor &A) override {
408     F<AAType, G<AAType, Base, StateType>, StateType>::initialize(A);
409     G<AAType, Base, StateType>::initialize(A);
410   }
411 
412   /// See AbstractAttribute::updateImpl(...).
413   ChangeStatus updateImpl(Attributor &A) override {
414     ChangeStatus ChangedF =
415         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
416     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
417     return ChangedF | ChangedG;
418   }
419 };
420 
421 /// Helper class for generic deduction: return value -> returned position.
422 template <typename AAType, typename Base,
423           typename StateType = typename Base::StateType>
424 struct AAReturnedFromReturnedValues : public Base {
425   AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
426 
427   /// See AbstractAttribute::updateImpl(...).
428   ChangeStatus updateImpl(Attributor &A) override {
429     StateType S(StateType::getBestState(this->getState()));
430     clampReturnedValueStates<AAType, StateType>(A, *this, S);
431     // TODO: If we know we visited all returned values, thus no are assumed
432     // dead, we can take the known information from the state T.
433     return clampStateAndIndicateChange<StateType>(this->getState(), S);
434   }
435 };
436 
437 /// Clamp the information known at all call sites for a given argument
438 /// (identified by \p QueryingAA) into \p S.
439 template <typename AAType, typename StateType = typename AAType::StateType>
440 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
441                                         StateType &S) {
442   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
443                     << QueryingAA << " into " << S << "\n");
444 
445   assert(QueryingAA.getIRPosition().getPositionKind() ==
446              IRPosition::IRP_ARGUMENT &&
447          "Can only clamp call site argument states for an argument position!");
448 
449   // Use an optional state as there might not be any return values and we want
450   // to join (IntegerState::operator&) the state of all there are.
451   Optional<StateType> T;
452 
453   // The argument number which is also the call site argument number.
454   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
455 
456   auto CallSiteCheck = [&](AbstractCallSite ACS) {
457     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
458     // Check if a coresponding argument was found or if it is on not associated
459     // (which can happen for callback calls).
460     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
461       return false;
462 
463     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
464     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
465                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
466     const StateType &AAS = static_cast<const StateType &>(AA.getState());
467     if (T.hasValue())
468       *T &= AAS;
469     else
470       T = AAS;
471     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
472                       << "\n");
473     return T->isValidState();
474   };
475 
476   bool AllCallSitesKnown;
477   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
478                               AllCallSitesKnown))
479     S.indicatePessimisticFixpoint();
480   else if (T.hasValue())
481     S ^= *T;
482 }
483 
484 /// Helper class for generic deduction: call site argument -> argument position.
485 template <typename AAType, typename Base,
486           typename StateType = typename AAType::StateType>
487 struct AAArgumentFromCallSiteArguments : public Base {
488   AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
489 
490   /// See AbstractAttribute::updateImpl(...).
491   ChangeStatus updateImpl(Attributor &A) override {
492     StateType S(StateType::getBestState(this->getState()));
493     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
494     // TODO: If we know we visited all incoming values, thus no are assumed
495     // dead, we can take the known information from the state T.
496     return clampStateAndIndicateChange<StateType>(this->getState(), S);
497   }
498 };
499 
500 /// Helper class for generic replication: function returned -> cs returned.
501 template <typename AAType, typename Base,
502           typename StateType = typename Base::StateType>
503 struct AACallSiteReturnedFromReturned : public Base {
504   AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
505 
506   /// See AbstractAttribute::updateImpl(...).
507   ChangeStatus updateImpl(Attributor &A) override {
508     assert(this->getIRPosition().getPositionKind() ==
509                IRPosition::IRP_CALL_SITE_RETURNED &&
510            "Can only wrap function returned positions for call site returned "
511            "positions!");
512     auto &S = this->getState();
513 
514     const Function *AssociatedFunction =
515         this->getIRPosition().getAssociatedFunction();
516     if (!AssociatedFunction)
517       return S.indicatePessimisticFixpoint();
518 
519     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
520     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
521     return clampStateAndIndicateChange(
522         S, static_cast<const StateType &>(AA.getState()));
523   }
524 };
525 
526 /// Helper class for generic deduction using must-be-executed-context
527 /// Base class is required to have `followUse` method.
528 
529 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
530 /// U - Underlying use.
531 /// I - The user of the \p U.
532 /// `followUse` returns true if the value should be tracked transitively.
533 
534 template <typename AAType, typename Base,
535           typename StateType = typename AAType::StateType>
536 struct AAFromMustBeExecutedContext : public Base {
537   AAFromMustBeExecutedContext(const IRPosition &IRP) : Base(IRP) {}
538 
539   void initialize(Attributor &A) override {
540     Base::initialize(A);
541     const IRPosition &IRP = this->getIRPosition();
542     Instruction *CtxI = IRP.getCtxI();
543 
544     if (!CtxI)
545       return;
546 
547     for (const Use &U : IRP.getAssociatedValue().uses())
548       Uses.insert(&U);
549   }
550 
551   /// Helper function to accumulate uses.
552   void followUsesInContext(Attributor &A,
553                            MustBeExecutedContextExplorer &Explorer,
554                            const Instruction *CtxI,
555                            SetVector<const Use *> &Uses, StateType &State) {
556     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
557     for (unsigned u = 0; u < Uses.size(); ++u) {
558       const Use *U = Uses[u];
559       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
560         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
561         if (Found && Base::followUse(A, U, UserI, State))
562           for (const Use &Us : UserI->uses())
563             Uses.insert(&Us);
564       }
565     }
566   }
567 
568   /// See AbstractAttribute::updateImpl(...).
569   ChangeStatus updateImpl(Attributor &A) override {
570     auto BeforeState = this->getState();
571     auto &S = this->getState();
572     Instruction *CtxI = this->getIRPosition().getCtxI();
573     if (!CtxI)
574       return ChangeStatus::UNCHANGED;
575 
576     MustBeExecutedContextExplorer &Explorer =
577         A.getInfoCache().getMustBeExecutedContextExplorer();
578 
579     followUsesInContext(A, Explorer, CtxI, Uses, S);
580 
581     if (this->isAtFixpoint())
582       return ChangeStatus::CHANGED;
583 
584     SmallVector<const BranchInst *, 4> BrInsts;
585     auto Pred = [&](const Instruction *I) {
586       if (const BranchInst *Br = dyn_cast<BranchInst>(I))
587         if (Br->isConditional())
588           BrInsts.push_back(Br);
589       return true;
590     };
591 
592     // Here, accumulate conditional branch instructions in the context. We
593     // explore the child paths and collect the known states. The disjunction of
594     // those states can be merged to its own state. Let ParentState_i be a state
595     // to indicate the known information for an i-th branch instruction in the
596     // context. ChildStates are created for its successors respectively.
597     //
598     // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
599     // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
600     //      ...
601     // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
602     //
603     // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
604     //
605     // FIXME: Currently, recursive branches are not handled. For example, we
606     // can't deduce that ptr must be dereferenced in below function.
607     //
608     // void f(int a, int c, int *ptr) {
609     //    if(a)
610     //      if (b) {
611     //        *ptr = 0;
612     //      } else {
613     //        *ptr = 1;
614     //      }
615     //    else {
616     //      if (b) {
617     //        *ptr = 0;
618     //      } else {
619     //        *ptr = 1;
620     //      }
621     //    }
622     // }
623 
624     Explorer.checkForAllContext(CtxI, Pred);
625     for (const BranchInst *Br : BrInsts) {
626       StateType ParentState;
627 
628       // The known state of the parent state is a conjunction of children's
629       // known states so it is initialized with a best state.
630       ParentState.indicateOptimisticFixpoint();
631 
632       for (const BasicBlock *BB : Br->successors()) {
633         StateType ChildState;
634 
635         size_t BeforeSize = Uses.size();
636         followUsesInContext(A, Explorer, &BB->front(), Uses, ChildState);
637 
638         // Erase uses which only appear in the child.
639         for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
640           It = Uses.erase(It);
641 
642         ParentState &= ChildState;
643       }
644 
645       // Use only known state.
646       S += ParentState;
647     }
648 
649     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
650   }
651 
652 private:
653   /// Container for (transitive) uses of the associated value.
654   SetVector<const Use *> Uses;
655 };
656 
657 template <typename AAType, typename Base,
658           typename StateType = typename AAType::StateType>
659 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
660     AAComposeTwoGenericDeduction<AAType, Base, StateType,
661                                  AAFromMustBeExecutedContext,
662                                  AAArgumentFromCallSiteArguments>;
663 
664 template <typename AAType, typename Base,
665           typename StateType = typename AAType::StateType>
666 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
667     AAComposeTwoGenericDeduction<AAType, Base, StateType,
668                                  AAFromMustBeExecutedContext,
669                                  AACallSiteReturnedFromReturned>;
670 
671 /// -----------------------NoUnwind Function Attribute--------------------------
672 
673 struct AANoUnwindImpl : AANoUnwind {
674   AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
675 
676   const std::string getAsStr() const override {
677     return getAssumed() ? "nounwind" : "may-unwind";
678   }
679 
680   /// See AbstractAttribute::updateImpl(...).
681   ChangeStatus updateImpl(Attributor &A) override {
682     auto Opcodes = {
683         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
684         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
685         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
686 
687     auto CheckForNoUnwind = [&](Instruction &I) {
688       if (!I.mayThrow())
689         return true;
690 
691       if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
692         const auto &NoUnwindAA =
693             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
694         return NoUnwindAA.isAssumedNoUnwind();
695       }
696       return false;
697     };
698 
699     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
700       return indicatePessimisticFixpoint();
701 
702     return ChangeStatus::UNCHANGED;
703   }
704 };
705 
706 struct AANoUnwindFunction final : public AANoUnwindImpl {
707   AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
708 
709   /// See AbstractAttribute::trackStatistics()
710   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
711 };
712 
713 /// NoUnwind attribute deduction for a call sites.
714 struct AANoUnwindCallSite final : AANoUnwindImpl {
715   AANoUnwindCallSite(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
716 
717   /// See AbstractAttribute::initialize(...).
718   void initialize(Attributor &A) override {
719     AANoUnwindImpl::initialize(A);
720     Function *F = getAssociatedFunction();
721     if (!F)
722       indicatePessimisticFixpoint();
723   }
724 
725   /// See AbstractAttribute::updateImpl(...).
726   ChangeStatus updateImpl(Attributor &A) override {
727     // TODO: Once we have call site specific value information we can provide
728     //       call site specific liveness information and then it makes
729     //       sense to specialize attributes for call sites arguments instead of
730     //       redirecting requests to the callee argument.
731     Function *F = getAssociatedFunction();
732     const IRPosition &FnPos = IRPosition::function(*F);
733     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
734     return clampStateAndIndicateChange(
735         getState(),
736         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
737   }
738 
739   /// See AbstractAttribute::trackStatistics()
740   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
741 };
742 
743 /// --------------------- Function Return Values -------------------------------
744 
745 /// "Attribute" that collects all potential returned values and the return
746 /// instructions that they arise from.
747 ///
748 /// If there is a unique returned value R, the manifest method will:
749 ///   - mark R with the "returned" attribute, if R is an argument.
750 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
751 
752   /// Mapping of values potentially returned by the associated function to the
753   /// return instructions that might return them.
754   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
755 
756   /// Mapping to remember the number of returned values for a call site such
757   /// that we can avoid updates if nothing changed.
758   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
759 
760   /// Set of unresolved calls returned by the associated function.
761   SmallSetVector<CallBase *, 4> UnresolvedCalls;
762 
763   /// State flags
764   ///
765   ///{
766   bool IsFixed = false;
767   bool IsValidState = true;
768   ///}
769 
770 public:
771   AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
772 
773   /// See AbstractAttribute::initialize(...).
774   void initialize(Attributor &A) override {
775     // Reset the state.
776     IsFixed = false;
777     IsValidState = true;
778     ReturnedValues.clear();
779 
780     Function *F = getAssociatedFunction();
781     if (!F) {
782       indicatePessimisticFixpoint();
783       return;
784     }
785     assert(!F->getReturnType()->isVoidTy() &&
786            "Did not expect a void return type!");
787 
788     // The map from instruction opcodes to those instructions in the function.
789     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
790 
791     // Look through all arguments, if one is marked as returned we are done.
792     for (Argument &Arg : F->args()) {
793       if (Arg.hasReturnedAttr()) {
794         auto &ReturnInstSet = ReturnedValues[&Arg];
795         for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
796           ReturnInstSet.insert(cast<ReturnInst>(RI));
797 
798         indicateOptimisticFixpoint();
799         return;
800       }
801     }
802 
803     if (!A.isFunctionIPOAmendable(*F))
804       indicatePessimisticFixpoint();
805   }
806 
807   /// See AbstractAttribute::manifest(...).
808   ChangeStatus manifest(Attributor &A) override;
809 
810   /// See AbstractAttribute::getState(...).
811   AbstractState &getState() override { return *this; }
812 
813   /// See AbstractAttribute::getState(...).
814   const AbstractState &getState() const override { return *this; }
815 
816   /// See AbstractAttribute::updateImpl(Attributor &A).
817   ChangeStatus updateImpl(Attributor &A) override;
818 
819   llvm::iterator_range<iterator> returned_values() override {
820     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
821   }
822 
823   llvm::iterator_range<const_iterator> returned_values() const override {
824     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
825   }
826 
827   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
828     return UnresolvedCalls;
829   }
830 
831   /// Return the number of potential return values, -1 if unknown.
832   size_t getNumReturnValues() const override {
833     return isValidState() ? ReturnedValues.size() : -1;
834   }
835 
836   /// Return an assumed unique return value if a single candidate is found. If
837   /// there cannot be one, return a nullptr. If it is not clear yet, return the
838   /// Optional::NoneType.
839   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
840 
841   /// See AbstractState::checkForAllReturnedValues(...).
842   bool checkForAllReturnedValuesAndReturnInsts(
843       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
844       const override;
845 
846   /// Pretty print the attribute similar to the IR representation.
847   const std::string getAsStr() const override;
848 
849   /// See AbstractState::isAtFixpoint().
850   bool isAtFixpoint() const override { return IsFixed; }
851 
852   /// See AbstractState::isValidState().
853   bool isValidState() const override { return IsValidState; }
854 
855   /// See AbstractState::indicateOptimisticFixpoint(...).
856   ChangeStatus indicateOptimisticFixpoint() override {
857     IsFixed = true;
858     return ChangeStatus::UNCHANGED;
859   }
860 
861   ChangeStatus indicatePessimisticFixpoint() override {
862     IsFixed = true;
863     IsValidState = false;
864     return ChangeStatus::CHANGED;
865   }
866 };
867 
868 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
869   ChangeStatus Changed = ChangeStatus::UNCHANGED;
870 
871   // Bookkeeping.
872   assert(isValidState());
873   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
874                   "Number of function with known return values");
875 
876   // Check if we have an assumed unique return value that we could manifest.
877   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
878 
879   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
880     return Changed;
881 
882   // Bookkeeping.
883   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
884                   "Number of function with unique return");
885 
886   // Callback to replace the uses of CB with the constant C.
887   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
888     if (CB.getNumUses() == 0)
889       return ChangeStatus::UNCHANGED;
890     if (A.changeValueAfterManifest(CB, C))
891       return ChangeStatus::CHANGED;
892     return ChangeStatus::UNCHANGED;
893   };
894 
895   // If the assumed unique return value is an argument, annotate it.
896   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
897     // TODO: This should be handled differently!
898     this->AnchorVal = UniqueRVArg;
899     this->KindOrArgNo = UniqueRVArg->getArgNo();
900     Changed = IRAttribute::manifest(A);
901   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
902     // We can replace the returned value with the unique returned constant.
903     Value &AnchorValue = getAnchorValue();
904     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
905       for (const Use &U : F->uses())
906         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
907           if (CB->isCallee(&U)) {
908             Constant *RVCCast =
909                 CB->getType() == RVC->getType()
910                     ? RVC
911                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
912             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
913           }
914     } else {
915       assert(isa<CallBase>(AnchorValue) &&
916              "Expcected a function or call base anchor!");
917       Constant *RVCCast =
918           AnchorValue.getType() == RVC->getType()
919               ? RVC
920               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
921       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
922     }
923     if (Changed == ChangeStatus::CHANGED)
924       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
925                       "Number of function returns replaced by constant return");
926   }
927 
928   return Changed;
929 }
930 
931 const std::string AAReturnedValuesImpl::getAsStr() const {
932   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
933          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
934          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
935 }
936 
937 Optional<Value *>
938 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
939   // If checkForAllReturnedValues provides a unique value, ignoring potential
940   // undef values that can also be present, it is assumed to be the actual
941   // return value and forwarded to the caller of this method. If there are
942   // multiple, a nullptr is returned indicating there cannot be a unique
943   // returned value.
944   Optional<Value *> UniqueRV;
945 
946   auto Pred = [&](Value &RV) -> bool {
947     // If we found a second returned value and neither the current nor the saved
948     // one is an undef, there is no unique returned value. Undefs are special
949     // since we can pretend they have any value.
950     if (UniqueRV.hasValue() && UniqueRV != &RV &&
951         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
952       UniqueRV = nullptr;
953       return false;
954     }
955 
956     // Do not overwrite a value with an undef.
957     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
958       UniqueRV = &RV;
959 
960     return true;
961   };
962 
963   if (!A.checkForAllReturnedValues(Pred, *this))
964     UniqueRV = nullptr;
965 
966   return UniqueRV;
967 }
968 
969 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
970     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
971     const {
972   if (!isValidState())
973     return false;
974 
975   // Check all returned values but ignore call sites as long as we have not
976   // encountered an overdefined one during an update.
977   for (auto &It : ReturnedValues) {
978     Value *RV = It.first;
979 
980     CallBase *CB = dyn_cast<CallBase>(RV);
981     if (CB && !UnresolvedCalls.count(CB))
982       continue;
983 
984     if (!Pred(*RV, It.second))
985       return false;
986   }
987 
988   return true;
989 }
990 
991 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
992   size_t NumUnresolvedCalls = UnresolvedCalls.size();
993   bool Changed = false;
994 
995   // State used in the value traversals starting in returned values.
996   struct RVState {
997     // The map in which we collect return values -> return instrs.
998     decltype(ReturnedValues) &RetValsMap;
999     // The flag to indicate a change.
1000     bool &Changed;
1001     // The return instrs we come from.
1002     SmallSetVector<ReturnInst *, 4> RetInsts;
1003   };
1004 
1005   // Callback for a leaf value returned by the associated function.
1006   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1007                          bool) -> bool {
1008     auto Size = RVS.RetValsMap[&Val].size();
1009     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1010     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1011     RVS.Changed |= Inserted;
1012     LLVM_DEBUG({
1013       if (Inserted)
1014         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1015                << " => " << RVS.RetInsts.size() << "\n";
1016     });
1017     return true;
1018   };
1019 
1020   // Helper method to invoke the generic value traversal.
1021   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1022                                 const Instruction *CtxI) {
1023     IRPosition RetValPos = IRPosition::value(RV);
1024     return genericValueTraversal<AAReturnedValues, RVState>(
1025         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
1026   };
1027 
1028   // Callback for all "return intructions" live in the associated function.
1029   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1030     ReturnInst &Ret = cast<ReturnInst>(I);
1031     RVState RVS({ReturnedValues, Changed, {}});
1032     RVS.RetInsts.insert(&Ret);
1033     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1034   };
1035 
1036   // Start by discovering returned values from all live returned instructions in
1037   // the associated function.
1038   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1039     return indicatePessimisticFixpoint();
1040 
1041   // Once returned values "directly" present in the code are handled we try to
1042   // resolve returned calls.
1043   decltype(ReturnedValues) NewRVsMap;
1044   for (auto &It : ReturnedValues) {
1045     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1046                       << " by #" << It.second.size() << " RIs\n");
1047     CallBase *CB = dyn_cast<CallBase>(It.first);
1048     if (!CB || UnresolvedCalls.count(CB))
1049       continue;
1050 
1051     if (!CB->getCalledFunction()) {
1052       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1053                         << "\n");
1054       UnresolvedCalls.insert(CB);
1055       continue;
1056     }
1057 
1058     // TODO: use the function scope once we have call site AAReturnedValues.
1059     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1060         *this, IRPosition::function(*CB->getCalledFunction()));
1061     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1062                       << RetValAA << "\n");
1063 
1064     // Skip dead ends, thus if we do not know anything about the returned
1065     // call we mark it as unresolved and it will stay that way.
1066     if (!RetValAA.getState().isValidState()) {
1067       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1068                         << "\n");
1069       UnresolvedCalls.insert(CB);
1070       continue;
1071     }
1072 
1073     // Do not try to learn partial information. If the callee has unresolved
1074     // return values we will treat the call as unresolved/opaque.
1075     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1076     if (!RetValAAUnresolvedCalls.empty()) {
1077       UnresolvedCalls.insert(CB);
1078       continue;
1079     }
1080 
1081     // Now check if we can track transitively returned values. If possible, thus
1082     // if all return value can be represented in the current scope, do so.
1083     bool Unresolved = false;
1084     for (auto &RetValAAIt : RetValAA.returned_values()) {
1085       Value *RetVal = RetValAAIt.first;
1086       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1087           isa<Constant>(RetVal))
1088         continue;
1089       // Anything that did not fit in the above categories cannot be resolved,
1090       // mark the call as unresolved.
1091       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1092                            "cannot be translated: "
1093                         << *RetVal << "\n");
1094       UnresolvedCalls.insert(CB);
1095       Unresolved = true;
1096       break;
1097     }
1098 
1099     if (Unresolved)
1100       continue;
1101 
1102     // Now track transitively returned values.
1103     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1104     if (NumRetAA == RetValAA.getNumReturnValues()) {
1105       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1106                            "changed since it was seen last\n");
1107       continue;
1108     }
1109     NumRetAA = RetValAA.getNumReturnValues();
1110 
1111     for (auto &RetValAAIt : RetValAA.returned_values()) {
1112       Value *RetVal = RetValAAIt.first;
1113       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1114         // Arguments are mapped to call site operands and we begin the traversal
1115         // again.
1116         bool Unused = false;
1117         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1118         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1119         continue;
1120       } else if (isa<CallBase>(RetVal)) {
1121         // Call sites are resolved by the callee attribute over time, no need to
1122         // do anything for us.
1123         continue;
1124       } else if (isa<Constant>(RetVal)) {
1125         // Constants are valid everywhere, we can simply take them.
1126         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1127         continue;
1128       }
1129     }
1130   }
1131 
1132   // To avoid modifications to the ReturnedValues map while we iterate over it
1133   // we kept record of potential new entries in a copy map, NewRVsMap.
1134   for (auto &It : NewRVsMap) {
1135     assert(!It.second.empty() && "Entry does not add anything.");
1136     auto &ReturnInsts = ReturnedValues[It.first];
1137     for (ReturnInst *RI : It.second)
1138       if (ReturnInsts.insert(RI)) {
1139         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1140                           << *It.first << " => " << *RI << "\n");
1141         Changed = true;
1142       }
1143   }
1144 
1145   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1146   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1147 }
1148 
1149 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1150   AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1151 
1152   /// See AbstractAttribute::trackStatistics()
1153   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1154 };
1155 
1156 /// Returned values information for a call sites.
1157 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1158   AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1159 
1160   /// See AbstractAttribute::initialize(...).
1161   void initialize(Attributor &A) override {
1162     // TODO: Once we have call site specific value information we can provide
1163     //       call site specific liveness information and then it makes
1164     //       sense to specialize attributes for call sites instead of
1165     //       redirecting requests to the callee.
1166     llvm_unreachable("Abstract attributes for returned values are not "
1167                      "supported for call sites yet!");
1168   }
1169 
1170   /// See AbstractAttribute::updateImpl(...).
1171   ChangeStatus updateImpl(Attributor &A) override {
1172     return indicatePessimisticFixpoint();
1173   }
1174 
1175   /// See AbstractAttribute::trackStatistics()
1176   void trackStatistics() const override {}
1177 };
1178 
1179 /// ------------------------ NoSync Function Attribute -------------------------
1180 
1181 struct AANoSyncImpl : AANoSync {
1182   AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1183 
1184   const std::string getAsStr() const override {
1185     return getAssumed() ? "nosync" : "may-sync";
1186   }
1187 
1188   /// See AbstractAttribute::updateImpl(...).
1189   ChangeStatus updateImpl(Attributor &A) override;
1190 
1191   /// Helper function used to determine whether an instruction is non-relaxed
1192   /// atomic. In other words, if an atomic instruction does not have unordered
1193   /// or monotonic ordering
1194   static bool isNonRelaxedAtomic(Instruction *I);
1195 
1196   /// Helper function used to determine whether an instruction is volatile.
1197   static bool isVolatile(Instruction *I);
1198 
1199   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1200   /// memset).
1201   static bool isNoSyncIntrinsic(Instruction *I);
1202 };
1203 
1204 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1205   if (!I->isAtomic())
1206     return false;
1207 
1208   AtomicOrdering Ordering;
1209   switch (I->getOpcode()) {
1210   case Instruction::AtomicRMW:
1211     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1212     break;
1213   case Instruction::Store:
1214     Ordering = cast<StoreInst>(I)->getOrdering();
1215     break;
1216   case Instruction::Load:
1217     Ordering = cast<LoadInst>(I)->getOrdering();
1218     break;
1219   case Instruction::Fence: {
1220     auto *FI = cast<FenceInst>(I);
1221     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1222       return false;
1223     Ordering = FI->getOrdering();
1224     break;
1225   }
1226   case Instruction::AtomicCmpXchg: {
1227     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1228     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1229     // Only if both are relaxed, than it can be treated as relaxed.
1230     // Otherwise it is non-relaxed.
1231     if (Success != AtomicOrdering::Unordered &&
1232         Success != AtomicOrdering::Monotonic)
1233       return true;
1234     if (Failure != AtomicOrdering::Unordered &&
1235         Failure != AtomicOrdering::Monotonic)
1236       return true;
1237     return false;
1238   }
1239   default:
1240     llvm_unreachable(
1241         "New atomic operations need to be known in the attributor.");
1242   }
1243 
1244   // Relaxed.
1245   if (Ordering == AtomicOrdering::Unordered ||
1246       Ordering == AtomicOrdering::Monotonic)
1247     return false;
1248   return true;
1249 }
1250 
1251 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1252 /// FIXME: We should ipmrove the handling of intrinsics.
1253 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1254   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1255     switch (II->getIntrinsicID()) {
1256     /// Element wise atomic memory intrinsics are can only be unordered,
1257     /// therefore nosync.
1258     case Intrinsic::memset_element_unordered_atomic:
1259     case Intrinsic::memmove_element_unordered_atomic:
1260     case Intrinsic::memcpy_element_unordered_atomic:
1261       return true;
1262     case Intrinsic::memset:
1263     case Intrinsic::memmove:
1264     case Intrinsic::memcpy:
1265       if (!cast<MemIntrinsic>(II)->isVolatile())
1266         return true;
1267       return false;
1268     default:
1269       return false;
1270     }
1271   }
1272   return false;
1273 }
1274 
1275 bool AANoSyncImpl::isVolatile(Instruction *I) {
1276   assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1277          "Calls should not be checked here");
1278 
1279   switch (I->getOpcode()) {
1280   case Instruction::AtomicRMW:
1281     return cast<AtomicRMWInst>(I)->isVolatile();
1282   case Instruction::Store:
1283     return cast<StoreInst>(I)->isVolatile();
1284   case Instruction::Load:
1285     return cast<LoadInst>(I)->isVolatile();
1286   case Instruction::AtomicCmpXchg:
1287     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1288   default:
1289     return false;
1290   }
1291 }
1292 
1293 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1294 
1295   auto CheckRWInstForNoSync = [&](Instruction &I) {
1296     /// We are looking for volatile instructions or Non-Relaxed atomics.
1297     /// FIXME: We should improve the handling of intrinsics.
1298 
1299     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1300       return true;
1301 
1302     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1303       if (ICS.hasFnAttr(Attribute::NoSync))
1304         return true;
1305 
1306       const auto &NoSyncAA =
1307           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1308       if (NoSyncAA.isAssumedNoSync())
1309         return true;
1310       return false;
1311     }
1312 
1313     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1314       return true;
1315 
1316     return false;
1317   };
1318 
1319   auto CheckForNoSync = [&](Instruction &I) {
1320     // At this point we handled all read/write effects and they are all
1321     // nosync, so they can be skipped.
1322     if (I.mayReadOrWriteMemory())
1323       return true;
1324 
1325     // non-convergent and readnone imply nosync.
1326     return !ImmutableCallSite(&I).isConvergent();
1327   };
1328 
1329   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1330       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1331     return indicatePessimisticFixpoint();
1332 
1333   return ChangeStatus::UNCHANGED;
1334 }
1335 
1336 struct AANoSyncFunction final : public AANoSyncImpl {
1337   AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1338 
1339   /// See AbstractAttribute::trackStatistics()
1340   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1341 };
1342 
1343 /// NoSync attribute deduction for a call sites.
1344 struct AANoSyncCallSite final : AANoSyncImpl {
1345   AANoSyncCallSite(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1346 
1347   /// See AbstractAttribute::initialize(...).
1348   void initialize(Attributor &A) override {
1349     AANoSyncImpl::initialize(A);
1350     Function *F = getAssociatedFunction();
1351     if (!F)
1352       indicatePessimisticFixpoint();
1353   }
1354 
1355   /// See AbstractAttribute::updateImpl(...).
1356   ChangeStatus updateImpl(Attributor &A) override {
1357     // TODO: Once we have call site specific value information we can provide
1358     //       call site specific liveness information and then it makes
1359     //       sense to specialize attributes for call sites arguments instead of
1360     //       redirecting requests to the callee argument.
1361     Function *F = getAssociatedFunction();
1362     const IRPosition &FnPos = IRPosition::function(*F);
1363     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1364     return clampStateAndIndicateChange(
1365         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1366   }
1367 
1368   /// See AbstractAttribute::trackStatistics()
1369   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1370 };
1371 
1372 /// ------------------------ No-Free Attributes ----------------------------
1373 
1374 struct AANoFreeImpl : public AANoFree {
1375   AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1376 
1377   /// See AbstractAttribute::updateImpl(...).
1378   ChangeStatus updateImpl(Attributor &A) override {
1379     auto CheckForNoFree = [&](Instruction &I) {
1380       ImmutableCallSite ICS(&I);
1381       if (ICS.hasFnAttr(Attribute::NoFree))
1382         return true;
1383 
1384       const auto &NoFreeAA =
1385           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1386       return NoFreeAA.isAssumedNoFree();
1387     };
1388 
1389     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1390       return indicatePessimisticFixpoint();
1391     return ChangeStatus::UNCHANGED;
1392   }
1393 
1394   /// See AbstractAttribute::getAsStr().
1395   const std::string getAsStr() const override {
1396     return getAssumed() ? "nofree" : "may-free";
1397   }
1398 };
1399 
1400 struct AANoFreeFunction final : public AANoFreeImpl {
1401   AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1402 
1403   /// See AbstractAttribute::trackStatistics()
1404   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1405 };
1406 
1407 /// NoFree attribute deduction for a call sites.
1408 struct AANoFreeCallSite final : AANoFreeImpl {
1409   AANoFreeCallSite(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1410 
1411   /// See AbstractAttribute::initialize(...).
1412   void initialize(Attributor &A) override {
1413     AANoFreeImpl::initialize(A);
1414     Function *F = getAssociatedFunction();
1415     if (!F)
1416       indicatePessimisticFixpoint();
1417   }
1418 
1419   /// See AbstractAttribute::updateImpl(...).
1420   ChangeStatus updateImpl(Attributor &A) override {
1421     // TODO: Once we have call site specific value information we can provide
1422     //       call site specific liveness information and then it makes
1423     //       sense to specialize attributes for call sites arguments instead of
1424     //       redirecting requests to the callee argument.
1425     Function *F = getAssociatedFunction();
1426     const IRPosition &FnPos = IRPosition::function(*F);
1427     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1428     return clampStateAndIndicateChange(
1429         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1430   }
1431 
1432   /// See AbstractAttribute::trackStatistics()
1433   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1434 };
1435 
1436 /// NoFree attribute for floating values.
1437 struct AANoFreeFloating : AANoFreeImpl {
1438   AANoFreeFloating(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1439 
1440   /// See AbstractAttribute::trackStatistics()
1441   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1442 
1443   /// See Abstract Attribute::updateImpl(...).
1444   ChangeStatus updateImpl(Attributor &A) override {
1445     const IRPosition &IRP = getIRPosition();
1446 
1447     const auto &NoFreeAA =
1448         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1449     if (NoFreeAA.isAssumedNoFree())
1450       return ChangeStatus::UNCHANGED;
1451 
1452     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1453     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1454       Instruction *UserI = cast<Instruction>(U.getUser());
1455       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1456         if (CB->isBundleOperand(&U))
1457           return false;
1458         if (!CB->isArgOperand(&U))
1459           return true;
1460         unsigned ArgNo = CB->getArgOperandNo(&U);
1461 
1462         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1463             *this, IRPosition::callsite_argument(*CB, ArgNo));
1464         return NoFreeArg.isAssumedNoFree();
1465       }
1466 
1467       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1468           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1469         Follow = true;
1470         return true;
1471       }
1472       if (isa<ReturnInst>(UserI))
1473         return true;
1474 
1475       // Unknown user.
1476       return false;
1477     };
1478     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1479       return indicatePessimisticFixpoint();
1480 
1481     return ChangeStatus::UNCHANGED;
1482   }
1483 };
1484 
1485 /// NoFree attribute for a call site argument.
1486 struct AANoFreeArgument final : AANoFreeFloating {
1487   AANoFreeArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1488 
1489   /// See AbstractAttribute::trackStatistics()
1490   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1491 };
1492 
1493 /// NoFree attribute for call site arguments.
1494 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1495   AANoFreeCallSiteArgument(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1496 
1497   /// See AbstractAttribute::updateImpl(...).
1498   ChangeStatus updateImpl(Attributor &A) override {
1499     // TODO: Once we have call site specific value information we can provide
1500     //       call site specific liveness information and then it makes
1501     //       sense to specialize attributes for call sites arguments instead of
1502     //       redirecting requests to the callee argument.
1503     Argument *Arg = getAssociatedArgument();
1504     if (!Arg)
1505       return indicatePessimisticFixpoint();
1506     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1507     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1508     return clampStateAndIndicateChange(
1509         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1510   }
1511 
1512   /// See AbstractAttribute::trackStatistics()
1513   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1514 };
1515 
1516 /// NoFree attribute for function return value.
1517 struct AANoFreeReturned final : AANoFreeFloating {
1518   AANoFreeReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {
1519     llvm_unreachable("NoFree is not applicable to function returns!");
1520   }
1521 
1522   /// See AbstractAttribute::initialize(...).
1523   void initialize(Attributor &A) override {
1524     llvm_unreachable("NoFree is not applicable to function returns!");
1525   }
1526 
1527   /// See AbstractAttribute::updateImpl(...).
1528   ChangeStatus updateImpl(Attributor &A) override {
1529     llvm_unreachable("NoFree is not applicable to function returns!");
1530   }
1531 
1532   /// See AbstractAttribute::trackStatistics()
1533   void trackStatistics() const override {}
1534 };
1535 
1536 /// NoFree attribute deduction for a call site return value.
1537 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1538   AANoFreeCallSiteReturned(const IRPosition &IRP) : AANoFreeFloating(IRP) {}
1539 
1540   ChangeStatus manifest(Attributor &A) override {
1541     return ChangeStatus::UNCHANGED;
1542   }
1543   /// See AbstractAttribute::trackStatistics()
1544   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1545 };
1546 
1547 /// ------------------------ NonNull Argument Attribute ------------------------
1548 static int64_t getKnownNonNullAndDerefBytesForUse(
1549     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1550     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1551   TrackUse = false;
1552 
1553   const Value *UseV = U->get();
1554   if (!UseV->getType()->isPointerTy())
1555     return 0;
1556 
1557   Type *PtrTy = UseV->getType();
1558   const Function *F = I->getFunction();
1559   bool NullPointerIsDefined =
1560       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1561   const DataLayout &DL = A.getInfoCache().getDL();
1562   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
1563     if (ICS.isBundleOperand(U))
1564       return 0;
1565 
1566     if (ICS.isCallee(U)) {
1567       IsNonNull |= !NullPointerIsDefined;
1568       return 0;
1569     }
1570 
1571     unsigned ArgNo = ICS.getArgumentNo(U);
1572     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
1573     // As long as we only use known information there is no need to track
1574     // dependences here.
1575     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1576                                                   /* TrackDependence */ false);
1577     IsNonNull |= DerefAA.isKnownNonNull();
1578     return DerefAA.getKnownDereferenceableBytes();
1579   }
1580 
1581   // We need to follow common pointer manipulation uses to the accesses they
1582   // feed into. We can try to be smart to avoid looking through things we do not
1583   // like for now, e.g., non-inbounds GEPs.
1584   if (isa<CastInst>(I)) {
1585     TrackUse = true;
1586     return 0;
1587   }
1588   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1589     if (GEP->hasAllConstantIndices()) {
1590       TrackUse = true;
1591       return 0;
1592     }
1593 
1594   int64_t Offset;
1595   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1596     if (Base == &AssociatedValue &&
1597         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1598       int64_t DerefBytes =
1599           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1600 
1601       IsNonNull |= !NullPointerIsDefined;
1602       return std::max(int64_t(0), DerefBytes);
1603     }
1604   }
1605 
1606   /// Corner case when an offset is 0.
1607   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1608           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1609     if (Offset == 0 && Base == &AssociatedValue &&
1610         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1611       int64_t DerefBytes =
1612           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1613       IsNonNull |= !NullPointerIsDefined;
1614       return std::max(int64_t(0), DerefBytes);
1615     }
1616   }
1617 
1618   return 0;
1619 }
1620 
1621 struct AANonNullImpl : AANonNull {
1622   AANonNullImpl(const IRPosition &IRP)
1623       : AANonNull(IRP),
1624         NullIsDefined(NullPointerIsDefined(
1625             getAnchorScope(),
1626             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1627 
1628   /// See AbstractAttribute::initialize(...).
1629   void initialize(Attributor &A) override {
1630     if (!NullIsDefined &&
1631         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1632                 /* IgnoreSubsumingPositions */ false, &A))
1633       indicateOptimisticFixpoint();
1634     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1635       indicatePessimisticFixpoint();
1636     else
1637       AANonNull::initialize(A);
1638   }
1639 
1640   /// See AAFromMustBeExecutedContext
1641   bool followUse(Attributor &A, const Use *U, const Instruction *I,
1642                  AANonNull::StateType &State) {
1643     bool IsNonNull = false;
1644     bool TrackUse = false;
1645     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1646                                        IsNonNull, TrackUse);
1647     State.setKnown(IsNonNull);
1648     return TrackUse;
1649   }
1650 
1651   /// See AbstractAttribute::getAsStr().
1652   const std::string getAsStr() const override {
1653     return getAssumed() ? "nonnull" : "may-null";
1654   }
1655 
1656   /// Flag to determine if the underlying value can be null and still allow
1657   /// valid accesses.
1658   const bool NullIsDefined;
1659 };
1660 
1661 /// NonNull attribute for a floating value.
1662 struct AANonNullFloating
1663     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1664   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1665   AANonNullFloating(const IRPosition &IRP) : Base(IRP) {}
1666 
1667   /// See AbstractAttribute::updateImpl(...).
1668   ChangeStatus updateImpl(Attributor &A) override {
1669     ChangeStatus Change = Base::updateImpl(A);
1670     if (isKnownNonNull())
1671       return Change;
1672 
1673     if (!NullIsDefined) {
1674       const auto &DerefAA =
1675           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1676       if (DerefAA.getAssumedDereferenceableBytes())
1677         return Change;
1678     }
1679 
1680     const DataLayout &DL = A.getDataLayout();
1681 
1682     DominatorTree *DT = nullptr;
1683     AssumptionCache *AC = nullptr;
1684     InformationCache &InfoCache = A.getInfoCache();
1685     if (const Function *Fn = getAnchorScope()) {
1686       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1687       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1688     }
1689 
1690     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1691                             AANonNull::StateType &T, bool Stripped) -> bool {
1692       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1693       if (!Stripped && this == &AA) {
1694         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1695           T.indicatePessimisticFixpoint();
1696       } else {
1697         // Use abstract attribute information.
1698         const AANonNull::StateType &NS =
1699             static_cast<const AANonNull::StateType &>(AA.getState());
1700         T ^= NS;
1701       }
1702       return T.isValidState();
1703     };
1704 
1705     StateType T;
1706     if (!genericValueTraversal<AANonNull, StateType>(
1707             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1708       return indicatePessimisticFixpoint();
1709 
1710     return clampStateAndIndicateChange(getState(), T);
1711   }
1712 
1713   /// See AbstractAttribute::trackStatistics()
1714   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1715 };
1716 
1717 /// NonNull attribute for function return value.
1718 struct AANonNullReturned final
1719     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1720   AANonNullReturned(const IRPosition &IRP)
1721       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1722 
1723   /// See AbstractAttribute::trackStatistics()
1724   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1725 };
1726 
1727 /// NonNull attribute for function argument.
1728 struct AANonNullArgument final
1729     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1730                                                               AANonNullImpl> {
1731   AANonNullArgument(const IRPosition &IRP)
1732       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1733                                                                 AANonNullImpl>(
1734             IRP) {}
1735 
1736   /// See AbstractAttribute::trackStatistics()
1737   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1738 };
1739 
1740 struct AANonNullCallSiteArgument final : AANonNullFloating {
1741   AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1742 
1743   /// See AbstractAttribute::trackStatistics()
1744   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1745 };
1746 
1747 /// NonNull attribute for a call site return position.
1748 struct AANonNullCallSiteReturned final
1749     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1750                                                              AANonNullImpl> {
1751   AANonNullCallSiteReturned(const IRPosition &IRP)
1752       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1753                                                                AANonNullImpl>(
1754             IRP) {}
1755 
1756   /// See AbstractAttribute::trackStatistics()
1757   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1758 };
1759 
1760 /// ------------------------ No-Recurse Attributes ----------------------------
1761 
1762 struct AANoRecurseImpl : public AANoRecurse {
1763   AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1764 
1765   /// See AbstractAttribute::getAsStr()
1766   const std::string getAsStr() const override {
1767     return getAssumed() ? "norecurse" : "may-recurse";
1768   }
1769 };
1770 
1771 struct AANoRecurseFunction final : AANoRecurseImpl {
1772   AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1773 
1774   /// See AbstractAttribute::initialize(...).
1775   void initialize(Attributor &A) override {
1776     AANoRecurseImpl::initialize(A);
1777     if (const Function *F = getAnchorScope())
1778       if (A.getInfoCache().getSccSize(*F) != 1)
1779         indicatePessimisticFixpoint();
1780   }
1781 
1782   /// See AbstractAttribute::updateImpl(...).
1783   ChangeStatus updateImpl(Attributor &A) override {
1784 
1785     // If all live call sites are known to be no-recurse, we are as well.
1786     auto CallSitePred = [&](AbstractCallSite ACS) {
1787       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1788           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1789           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1790       return NoRecurseAA.isKnownNoRecurse();
1791     };
1792     bool AllCallSitesKnown;
1793     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1794       // If we know all call sites and all are known no-recurse, we are done.
1795       // If all known call sites, which might not be all that exist, are known
1796       // to be no-recurse, we are not done but we can continue to assume
1797       // no-recurse. If one of the call sites we have not visited will become
1798       // live, another update is triggered.
1799       if (AllCallSitesKnown)
1800         indicateOptimisticFixpoint();
1801       return ChangeStatus::UNCHANGED;
1802     }
1803 
1804     // If the above check does not hold anymore we look at the calls.
1805     auto CheckForNoRecurse = [&](Instruction &I) {
1806       ImmutableCallSite ICS(&I);
1807       if (ICS.hasFnAttr(Attribute::NoRecurse))
1808         return true;
1809 
1810       const auto &NoRecurseAA =
1811           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(ICS));
1812       if (!NoRecurseAA.isAssumedNoRecurse())
1813         return false;
1814 
1815       // Recursion to the same function
1816       if (ICS.getCalledFunction() == getAnchorScope())
1817         return false;
1818 
1819       return true;
1820     };
1821 
1822     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1823       return indicatePessimisticFixpoint();
1824     return ChangeStatus::UNCHANGED;
1825   }
1826 
1827   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1828 };
1829 
1830 /// NoRecurse attribute deduction for a call sites.
1831 struct AANoRecurseCallSite final : AANoRecurseImpl {
1832   AANoRecurseCallSite(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1833 
1834   /// See AbstractAttribute::initialize(...).
1835   void initialize(Attributor &A) override {
1836     AANoRecurseImpl::initialize(A);
1837     Function *F = getAssociatedFunction();
1838     if (!F)
1839       indicatePessimisticFixpoint();
1840   }
1841 
1842   /// See AbstractAttribute::updateImpl(...).
1843   ChangeStatus updateImpl(Attributor &A) override {
1844     // TODO: Once we have call site specific value information we can provide
1845     //       call site specific liveness information and then it makes
1846     //       sense to specialize attributes for call sites arguments instead of
1847     //       redirecting requests to the callee argument.
1848     Function *F = getAssociatedFunction();
1849     const IRPosition &FnPos = IRPosition::function(*F);
1850     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1851     return clampStateAndIndicateChange(
1852         getState(),
1853         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1854   }
1855 
1856   /// See AbstractAttribute::trackStatistics()
1857   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1858 };
1859 
1860 /// -------------------- Undefined-Behavior Attributes ------------------------
1861 
1862 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1863   AAUndefinedBehaviorImpl(const IRPosition &IRP) : AAUndefinedBehavior(IRP) {}
1864 
1865   /// See AbstractAttribute::updateImpl(...).
1866   // through a pointer (i.e. also branches etc.)
1867   ChangeStatus updateImpl(Attributor &A) override {
1868     const size_t UBPrevSize = KnownUBInsts.size();
1869     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1870 
1871     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1872       // Skip instructions that are already saved.
1873       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1874         return true;
1875 
1876       // If we reach here, we know we have an instruction
1877       // that accesses memory through a pointer operand,
1878       // for which getPointerOperand() should give it to us.
1879       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1880       assert(PtrOp &&
1881              "Expected pointer operand of memory accessing instruction");
1882 
1883       // Either we stopped and the appropriate action was taken,
1884       // or we got back a simplified value to continue.
1885       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1886       if (!SimplifiedPtrOp.hasValue())
1887         return true;
1888       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1889 
1890       // A memory access through a pointer is considered UB
1891       // only if the pointer has constant null value.
1892       // TODO: Expand it to not only check constant values.
1893       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1894         AssumedNoUBInsts.insert(&I);
1895         return true;
1896       }
1897       const Type *PtrTy = PtrOpVal->getType();
1898 
1899       // Because we only consider instructions inside functions,
1900       // assume that a parent function exists.
1901       const Function *F = I.getFunction();
1902 
1903       // A memory access using constant null pointer is only considered UB
1904       // if null pointer is _not_ defined for the target platform.
1905       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1906         AssumedNoUBInsts.insert(&I);
1907       else
1908         KnownUBInsts.insert(&I);
1909       return true;
1910     };
1911 
1912     auto InspectBrInstForUB = [&](Instruction &I) {
1913       // A conditional branch instruction is considered UB if it has `undef`
1914       // condition.
1915 
1916       // Skip instructions that are already saved.
1917       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1918         return true;
1919 
1920       // We know we have a branch instruction.
1921       auto BrInst = cast<BranchInst>(&I);
1922 
1923       // Unconditional branches are never considered UB.
1924       if (BrInst->isUnconditional())
1925         return true;
1926 
1927       // Either we stopped and the appropriate action was taken,
1928       // or we got back a simplified value to continue.
1929       Optional<Value *> SimplifiedCond =
1930           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1931       if (!SimplifiedCond.hasValue())
1932         return true;
1933       AssumedNoUBInsts.insert(&I);
1934       return true;
1935     };
1936 
1937     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1938                               {Instruction::Load, Instruction::Store,
1939                                Instruction::AtomicCmpXchg,
1940                                Instruction::AtomicRMW},
1941                               /* CheckBBLivenessOnly */ true);
1942     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1943                               /* CheckBBLivenessOnly */ true);
1944     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1945         UBPrevSize != KnownUBInsts.size())
1946       return ChangeStatus::CHANGED;
1947     return ChangeStatus::UNCHANGED;
1948   }
1949 
1950   bool isKnownToCauseUB(Instruction *I) const override {
1951     return KnownUBInsts.count(I);
1952   }
1953 
1954   bool isAssumedToCauseUB(Instruction *I) const override {
1955     // In simple words, if an instruction is not in the assumed to _not_
1956     // cause UB, then it is assumed UB (that includes those
1957     // in the KnownUBInsts set). The rest is boilerplate
1958     // is to ensure that it is one of the instructions we test
1959     // for UB.
1960 
1961     switch (I->getOpcode()) {
1962     case Instruction::Load:
1963     case Instruction::Store:
1964     case Instruction::AtomicCmpXchg:
1965     case Instruction::AtomicRMW:
1966       return !AssumedNoUBInsts.count(I);
1967     case Instruction::Br: {
1968       auto BrInst = cast<BranchInst>(I);
1969       if (BrInst->isUnconditional())
1970         return false;
1971       return !AssumedNoUBInsts.count(I);
1972     } break;
1973     default:
1974       return false;
1975     }
1976     return false;
1977   }
1978 
1979   ChangeStatus manifest(Attributor &A) override {
1980     if (KnownUBInsts.empty())
1981       return ChangeStatus::UNCHANGED;
1982     for (Instruction *I : KnownUBInsts)
1983       A.changeToUnreachableAfterManifest(I);
1984     return ChangeStatus::CHANGED;
1985   }
1986 
1987   /// See AbstractAttribute::getAsStr()
1988   const std::string getAsStr() const override {
1989     return getAssumed() ? "undefined-behavior" : "no-ub";
1990   }
1991 
1992   /// Note: The correctness of this analysis depends on the fact that the
1993   /// following 2 sets will stop changing after some point.
1994   /// "Change" here means that their size changes.
1995   /// The size of each set is monotonically increasing
1996   /// (we only add items to them) and it is upper bounded by the number of
1997   /// instructions in the processed function (we can never save more
1998   /// elements in either set than this number). Hence, at some point,
1999   /// they will stop increasing.
2000   /// Consequently, at some point, both sets will have stopped
2001   /// changing, effectively making the analysis reach a fixpoint.
2002 
2003   /// Note: These 2 sets are disjoint and an instruction can be considered
2004   /// one of 3 things:
2005   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2006   ///    the KnownUBInsts set.
2007   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2008   ///    has a reason to assume it).
2009   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2010   ///    could not find a reason to assume or prove that it can cause UB,
2011   ///    hence it assumes it doesn't. We have a set for these instructions
2012   ///    so that we don't reprocess them in every update.
2013   ///    Note however that instructions in this set may cause UB.
2014 
2015 protected:
2016   /// A set of all live instructions _known_ to cause UB.
2017   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2018 
2019 private:
2020   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2021   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2022 
2023   // Should be called on updates in which if we're processing an instruction
2024   // \p I that depends on a value \p V, one of the following has to happen:
2025   // - If the value is assumed, then stop.
2026   // - If the value is known but undef, then consider it UB.
2027   // - Otherwise, do specific processing with the simplified value.
2028   // We return None in the first 2 cases to signify that an appropriate
2029   // action was taken and the caller should stop.
2030   // Otherwise, we return the simplified value that the caller should
2031   // use for specific processing.
2032   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2033                                          Instruction *I) {
2034     const auto &ValueSimplifyAA =
2035         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2036     Optional<Value *> SimplifiedV =
2037         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2038     if (!ValueSimplifyAA.isKnown()) {
2039       // Don't depend on assumed values.
2040       return llvm::None;
2041     }
2042     if (!SimplifiedV.hasValue()) {
2043       // If it is known (which we tested above) but it doesn't have a value,
2044       // then we can assume `undef` and hence the instruction is UB.
2045       KnownUBInsts.insert(I);
2046       return llvm::None;
2047     }
2048     Value *Val = SimplifiedV.getValue();
2049     if (isa<UndefValue>(Val)) {
2050       KnownUBInsts.insert(I);
2051       return llvm::None;
2052     }
2053     return Val;
2054   }
2055 };
2056 
2057 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2058   AAUndefinedBehaviorFunction(const IRPosition &IRP)
2059       : AAUndefinedBehaviorImpl(IRP) {}
2060 
2061   /// See AbstractAttribute::trackStatistics()
2062   void trackStatistics() const override {
2063     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2064                "Number of instructions known to have UB");
2065     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2066         KnownUBInsts.size();
2067   }
2068 };
2069 
2070 /// ------------------------ Will-Return Attributes ----------------------------
2071 
2072 // Helper function that checks whether a function has any cycle which we don't
2073 // know if it is bounded or not.
2074 // Loops with maximum trip count are considered bounded, any other cycle not.
2075 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2076   ScalarEvolution *SE =
2077       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2078   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2079   // If either SCEV or LoopInfo is not available for the function then we assume
2080   // any cycle to be unbounded cycle.
2081   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2082   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2083   if (!SE || !LI) {
2084     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2085       if (SCCI.hasCycle())
2086         return true;
2087     return false;
2088   }
2089 
2090   // If there's irreducible control, the function may contain non-loop cycles.
2091   if (mayContainIrreducibleControl(F, LI))
2092     return true;
2093 
2094   // Any loop that does not have a max trip count is considered unbounded cycle.
2095   for (auto *L : LI->getLoopsInPreorder()) {
2096     if (!SE->getSmallConstantMaxTripCount(L))
2097       return true;
2098   }
2099   return false;
2100 }
2101 
2102 struct AAWillReturnImpl : public AAWillReturn {
2103   AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
2104 
2105   /// See AbstractAttribute::initialize(...).
2106   void initialize(Attributor &A) override {
2107     AAWillReturn::initialize(A);
2108 
2109     Function *F = getAnchorScope();
2110     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2111       indicatePessimisticFixpoint();
2112   }
2113 
2114   /// See AbstractAttribute::updateImpl(...).
2115   ChangeStatus updateImpl(Attributor &A) override {
2116     auto CheckForWillReturn = [&](Instruction &I) {
2117       IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
2118       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2119       if (WillReturnAA.isKnownWillReturn())
2120         return true;
2121       if (!WillReturnAA.isAssumedWillReturn())
2122         return false;
2123       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2124       return NoRecurseAA.isAssumedNoRecurse();
2125     };
2126 
2127     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2128       return indicatePessimisticFixpoint();
2129 
2130     return ChangeStatus::UNCHANGED;
2131   }
2132 
2133   /// See AbstractAttribute::getAsStr()
2134   const std::string getAsStr() const override {
2135     return getAssumed() ? "willreturn" : "may-noreturn";
2136   }
2137 };
2138 
2139 struct AAWillReturnFunction final : AAWillReturnImpl {
2140   AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2141 
2142   /// See AbstractAttribute::trackStatistics()
2143   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2144 };
2145 
2146 /// WillReturn attribute deduction for a call sites.
2147 struct AAWillReturnCallSite final : AAWillReturnImpl {
2148   AAWillReturnCallSite(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
2149 
2150   /// See AbstractAttribute::initialize(...).
2151   void initialize(Attributor &A) override {
2152     AAWillReturnImpl::initialize(A);
2153     Function *F = getAssociatedFunction();
2154     if (!F)
2155       indicatePessimisticFixpoint();
2156   }
2157 
2158   /// See AbstractAttribute::updateImpl(...).
2159   ChangeStatus updateImpl(Attributor &A) override {
2160     // TODO: Once we have call site specific value information we can provide
2161     //       call site specific liveness information and then it makes
2162     //       sense to specialize attributes for call sites arguments instead of
2163     //       redirecting requests to the callee argument.
2164     Function *F = getAssociatedFunction();
2165     const IRPosition &FnPos = IRPosition::function(*F);
2166     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2167     return clampStateAndIndicateChange(
2168         getState(),
2169         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2170   }
2171 
2172   /// See AbstractAttribute::trackStatistics()
2173   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2174 };
2175 
2176 /// -------------------AAReachability Attribute--------------------------
2177 
2178 struct AAReachabilityImpl : AAReachability {
2179   AAReachabilityImpl(const IRPosition &IRP) : AAReachability(IRP) {}
2180 
2181   const std::string getAsStr() const override {
2182     // TODO: Return the number of reachable queries.
2183     return "reachable";
2184   }
2185 
2186   /// See AbstractAttribute::initialize(...).
2187   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2188 
2189   /// See AbstractAttribute::updateImpl(...).
2190   ChangeStatus updateImpl(Attributor &A) override {
2191     return indicatePessimisticFixpoint();
2192   }
2193 };
2194 
2195 struct AAReachabilityFunction final : public AAReachabilityImpl {
2196   AAReachabilityFunction(const IRPosition &IRP) : AAReachabilityImpl(IRP) {}
2197 
2198   /// See AbstractAttribute::trackStatistics()
2199   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2200 };
2201 
2202 /// ------------------------ NoAlias Argument Attribute ------------------------
2203 
2204 struct AANoAliasImpl : AANoAlias {
2205   AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {
2206     assert(getAssociatedType()->isPointerTy() &&
2207            "Noalias is a pointer attribute");
2208   }
2209 
2210   const std::string getAsStr() const override {
2211     return getAssumed() ? "noalias" : "may-alias";
2212   }
2213 };
2214 
2215 /// NoAlias attribute for a floating value.
2216 struct AANoAliasFloating final : AANoAliasImpl {
2217   AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2218 
2219   /// See AbstractAttribute::initialize(...).
2220   void initialize(Attributor &A) override {
2221     AANoAliasImpl::initialize(A);
2222     Value *Val = &getAssociatedValue();
2223     do {
2224       CastInst *CI = dyn_cast<CastInst>(Val);
2225       if (!CI)
2226         break;
2227       Value *Base = CI->getOperand(0);
2228       if (Base->getNumUses() != 1)
2229         break;
2230       Val = Base;
2231     } while (true);
2232 
2233     if (!Val->getType()->isPointerTy()) {
2234       indicatePessimisticFixpoint();
2235       return;
2236     }
2237 
2238     if (isa<AllocaInst>(Val))
2239       indicateOptimisticFixpoint();
2240     else if (isa<ConstantPointerNull>(Val) &&
2241              !NullPointerIsDefined(getAnchorScope(),
2242                                    Val->getType()->getPointerAddressSpace()))
2243       indicateOptimisticFixpoint();
2244     else if (Val != &getAssociatedValue()) {
2245       const auto &ValNoAliasAA =
2246           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2247       if (ValNoAliasAA.isKnownNoAlias())
2248         indicateOptimisticFixpoint();
2249     }
2250   }
2251 
2252   /// See AbstractAttribute::updateImpl(...).
2253   ChangeStatus updateImpl(Attributor &A) override {
2254     // TODO: Implement this.
2255     return indicatePessimisticFixpoint();
2256   }
2257 
2258   /// See AbstractAttribute::trackStatistics()
2259   void trackStatistics() const override {
2260     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2261   }
2262 };
2263 
2264 /// NoAlias attribute for an argument.
2265 struct AANoAliasArgument final
2266     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2267   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2268   AANoAliasArgument(const IRPosition &IRP) : Base(IRP) {}
2269 
2270   /// See AbstractAttribute::initialize(...).
2271   void initialize(Attributor &A) override {
2272     Base::initialize(A);
2273     // See callsite argument attribute and callee argument attribute.
2274     if (hasAttr({Attribute::ByVal}))
2275       indicateOptimisticFixpoint();
2276   }
2277 
2278   /// See AbstractAttribute::update(...).
2279   ChangeStatus updateImpl(Attributor &A) override {
2280     // We have to make sure no-alias on the argument does not break
2281     // synchronization when this is a callback argument, see also [1] below.
2282     // If synchronization cannot be affected, we delegate to the base updateImpl
2283     // function, otherwise we give up for now.
2284 
2285     // If the function is no-sync, no-alias cannot break synchronization.
2286     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2287         *this, IRPosition::function_scope(getIRPosition()));
2288     if (NoSyncAA.isAssumedNoSync())
2289       return Base::updateImpl(A);
2290 
2291     // If the argument is read-only, no-alias cannot break synchronization.
2292     const auto &MemBehaviorAA =
2293         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2294     if (MemBehaviorAA.isAssumedReadOnly())
2295       return Base::updateImpl(A);
2296 
2297     // If the argument is never passed through callbacks, no-alias cannot break
2298     // synchronization.
2299     bool AllCallSitesKnown;
2300     if (A.checkForAllCallSites(
2301             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2302             true, AllCallSitesKnown))
2303       return Base::updateImpl(A);
2304 
2305     // TODO: add no-alias but make sure it doesn't break synchronization by
2306     // introducing fake uses. See:
2307     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2308     //     International Workshop on OpenMP 2018,
2309     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2310 
2311     return indicatePessimisticFixpoint();
2312   }
2313 
2314   /// See AbstractAttribute::trackStatistics()
2315   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2316 };
2317 
2318 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2319   AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2320 
2321   /// See AbstractAttribute::initialize(...).
2322   void initialize(Attributor &A) override {
2323     // See callsite argument attribute and callee argument attribute.
2324     ImmutableCallSite ICS(&getAnchorValue());
2325     if (ICS.paramHasAttr(getArgNo(), Attribute::NoAlias))
2326       indicateOptimisticFixpoint();
2327     Value &Val = getAssociatedValue();
2328     if (isa<ConstantPointerNull>(Val) &&
2329         !NullPointerIsDefined(getAnchorScope(),
2330                               Val.getType()->getPointerAddressSpace()))
2331       indicateOptimisticFixpoint();
2332   }
2333 
2334   /// Determine if the underlying value may alias with the call site argument
2335   /// \p OtherArgNo of \p ICS (= the underlying call site).
2336   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2337                             const AAMemoryBehavior &MemBehaviorAA,
2338                             ImmutableCallSite ICS, unsigned OtherArgNo) {
2339     // We do not need to worry about aliasing with the underlying IRP.
2340     if (this->getArgNo() == (int)OtherArgNo)
2341       return false;
2342 
2343     // If it is not a pointer or pointer vector we do not alias.
2344     const Value *ArgOp = ICS.getArgOperand(OtherArgNo);
2345     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2346       return false;
2347 
2348     auto &ICSArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2349         *this, IRPosition::callsite_argument(ICS, OtherArgNo),
2350         /* TrackDependence */ false);
2351 
2352     // If the argument is readnone, there is no read-write aliasing.
2353     if (ICSArgMemBehaviorAA.isAssumedReadNone()) {
2354       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2355       return false;
2356     }
2357 
2358     // If the argument is readonly and the underlying value is readonly, there
2359     // is no read-write aliasing.
2360     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2361     if (ICSArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2362       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2363       A.recordDependence(ICSArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2364       return false;
2365     }
2366 
2367     // We have to utilize actual alias analysis queries so we need the object.
2368     if (!AAR)
2369       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2370 
2371     // Try to rule it out at the call site.
2372     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2373     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2374                          "callsite arguments: "
2375                       << getAssociatedValue() << " " << *ArgOp << " => "
2376                       << (IsAliasing ? "" : "no-") << "alias \n");
2377 
2378     return IsAliasing;
2379   }
2380 
2381   bool
2382   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2383                                          const AAMemoryBehavior &MemBehaviorAA,
2384                                          const AANoAlias &NoAliasAA) {
2385     // We can deduce "noalias" if the following conditions hold.
2386     // (i)   Associated value is assumed to be noalias in the definition.
2387     // (ii)  Associated value is assumed to be no-capture in all the uses
2388     //       possibly executed before this callsite.
2389     // (iii) There is no other pointer argument which could alias with the
2390     //       value.
2391 
2392     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2393     if (!AssociatedValueIsNoAliasAtDef) {
2394       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2395                         << " is not no-alias at the definition\n");
2396       return false;
2397     }
2398 
2399     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2400 
2401     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2402     auto &NoCaptureAA =
2403         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2404     // Check whether the value is captured in the scope using AANoCapture.
2405     //      Look at CFG and check only uses possibly executed before this
2406     //      callsite.
2407     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2408       Instruction *UserI = cast<Instruction>(U.getUser());
2409 
2410       // If user if curr instr and only use.
2411       if ((UserI == getCtxI()) && (UserI->getNumUses() == 1))
2412         return true;
2413 
2414       const Function *ScopeFn = VIRP.getAnchorScope();
2415       if (ScopeFn) {
2416         const auto &ReachabilityAA =
2417             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2418 
2419         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2420           return true;
2421 
2422         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2423           if (CB->isArgOperand(&U)) {
2424 
2425             unsigned ArgNo = CB->getArgOperandNo(&U);
2426 
2427             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2428                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2429 
2430             if (NoCaptureAA.isAssumedNoCapture())
2431               return true;
2432           }
2433         }
2434       }
2435 
2436       // For cases which can potentially have more users
2437       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2438           isa<SelectInst>(U)) {
2439         Follow = true;
2440         return true;
2441       }
2442 
2443       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2444       return false;
2445     };
2446 
2447     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2448       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2449         LLVM_DEBUG(
2450             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2451                    << " cannot be noalias as it is potentially captured\n");
2452         return false;
2453       }
2454     }
2455     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2456 
2457     // Check there is no other pointer argument which could alias with the
2458     // value passed at this call site.
2459     // TODO: AbstractCallSite
2460     ImmutableCallSite ICS(&getAnchorValue());
2461     for (unsigned OtherArgNo = 0; OtherArgNo < ICS.getNumArgOperands();
2462          OtherArgNo++)
2463       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, ICS, OtherArgNo))
2464         return false;
2465 
2466     return true;
2467   }
2468 
2469   /// See AbstractAttribute::updateImpl(...).
2470   ChangeStatus updateImpl(Attributor &A) override {
2471     // If the argument is readnone we are done as there are no accesses via the
2472     // argument.
2473     auto &MemBehaviorAA =
2474         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2475                                      /* TrackDependence */ false);
2476     if (MemBehaviorAA.isAssumedReadNone()) {
2477       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2478       return ChangeStatus::UNCHANGED;
2479     }
2480 
2481     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2482     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2483                                                   /* TrackDependence */ false);
2484 
2485     AAResults *AAR = nullptr;
2486     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2487                                                NoAliasAA)) {
2488       LLVM_DEBUG(
2489           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2490       return ChangeStatus::UNCHANGED;
2491     }
2492 
2493     return indicatePessimisticFixpoint();
2494   }
2495 
2496   /// See AbstractAttribute::trackStatistics()
2497   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2498 };
2499 
2500 /// NoAlias attribute for function return value.
2501 struct AANoAliasReturned final : AANoAliasImpl {
2502   AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2503 
2504   /// See AbstractAttribute::updateImpl(...).
2505   virtual ChangeStatus updateImpl(Attributor &A) override {
2506 
2507     auto CheckReturnValue = [&](Value &RV) -> bool {
2508       if (Constant *C = dyn_cast<Constant>(&RV))
2509         if (C->isNullValue() || isa<UndefValue>(C))
2510           return true;
2511 
2512       /// For now, we can only deduce noalias if we have call sites.
2513       /// FIXME: add more support.
2514       ImmutableCallSite ICS(&RV);
2515       if (!ICS)
2516         return false;
2517 
2518       const IRPosition &RVPos = IRPosition::value(RV);
2519       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2520       if (!NoAliasAA.isAssumedNoAlias())
2521         return false;
2522 
2523       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2524       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2525     };
2526 
2527     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2528       return indicatePessimisticFixpoint();
2529 
2530     return ChangeStatus::UNCHANGED;
2531   }
2532 
2533   /// See AbstractAttribute::trackStatistics()
2534   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2535 };
2536 
2537 /// NoAlias attribute deduction for a call site return value.
2538 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2539   AANoAliasCallSiteReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
2540 
2541   /// See AbstractAttribute::initialize(...).
2542   void initialize(Attributor &A) override {
2543     AANoAliasImpl::initialize(A);
2544     Function *F = getAssociatedFunction();
2545     if (!F)
2546       indicatePessimisticFixpoint();
2547   }
2548 
2549   /// See AbstractAttribute::updateImpl(...).
2550   ChangeStatus updateImpl(Attributor &A) override {
2551     // TODO: Once we have call site specific value information we can provide
2552     //       call site specific liveness information and then it makes
2553     //       sense to specialize attributes for call sites arguments instead of
2554     //       redirecting requests to the callee argument.
2555     Function *F = getAssociatedFunction();
2556     const IRPosition &FnPos = IRPosition::returned(*F);
2557     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2558     return clampStateAndIndicateChange(
2559         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2560   }
2561 
2562   /// See AbstractAttribute::trackStatistics()
2563   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2564 };
2565 
2566 /// -------------------AAIsDead Function Attribute-----------------------
2567 
2568 struct AAIsDeadValueImpl : public AAIsDead {
2569   AAIsDeadValueImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
2570 
2571   /// See AAIsDead::isAssumedDead().
2572   bool isAssumedDead() const override { return getAssumed(); }
2573 
2574   /// See AAIsDead::isKnownDead().
2575   bool isKnownDead() const override { return getKnown(); }
2576 
2577   /// See AAIsDead::isAssumedDead(BasicBlock *).
2578   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2579 
2580   /// See AAIsDead::isKnownDead(BasicBlock *).
2581   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2582 
2583   /// See AAIsDead::isAssumedDead(Instruction *I).
2584   bool isAssumedDead(const Instruction *I) const override {
2585     return I == getCtxI() && isAssumedDead();
2586   }
2587 
2588   /// See AAIsDead::isKnownDead(Instruction *I).
2589   bool isKnownDead(const Instruction *I) const override {
2590     return isAssumedDead(I) && getKnown();
2591   }
2592 
2593   /// See AbstractAttribute::getAsStr().
2594   const std::string getAsStr() const override {
2595     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2596   }
2597 
2598   /// Check if all uses are assumed dead.
2599   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2600     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2601     // Explicitly set the dependence class to required because we want a long
2602     // chain of N dependent instructions to be considered live as soon as one is
2603     // without going through N update cycles. This is not required for
2604     // correctness.
2605     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2606   }
2607 
2608   /// Determine if \p I is assumed to be side-effect free.
2609   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2610     if (!I || wouldInstructionBeTriviallyDead(I))
2611       return true;
2612 
2613     auto *CB = dyn_cast<CallBase>(I);
2614     if (!CB || isa<IntrinsicInst>(CB))
2615       return false;
2616 
2617     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2618     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2619     if (!NoUnwindAA.isAssumedNoUnwind())
2620       return false;
2621 
2622     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2623     if (!MemBehaviorAA.isAssumedReadOnly())
2624       return false;
2625 
2626     return true;
2627   }
2628 };
2629 
2630 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2631   AAIsDeadFloating(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2632 
2633   /// See AbstractAttribute::initialize(...).
2634   void initialize(Attributor &A) override {
2635     if (isa<UndefValue>(getAssociatedValue())) {
2636       indicatePessimisticFixpoint();
2637       return;
2638     }
2639 
2640     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2641     if (!isAssumedSideEffectFree(A, I))
2642       indicatePessimisticFixpoint();
2643   }
2644 
2645   /// See AbstractAttribute::updateImpl(...).
2646   ChangeStatus updateImpl(Attributor &A) override {
2647     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2648     if (!isAssumedSideEffectFree(A, I))
2649       return indicatePessimisticFixpoint();
2650 
2651     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2652       return indicatePessimisticFixpoint();
2653     return ChangeStatus::UNCHANGED;
2654   }
2655 
2656   /// See AbstractAttribute::manifest(...).
2657   ChangeStatus manifest(Attributor &A) override {
2658     Value &V = getAssociatedValue();
2659     if (auto *I = dyn_cast<Instruction>(&V)) {
2660       // If we get here we basically know the users are all dead. We check if
2661       // isAssumedSideEffectFree returns true here again because it might not be
2662       // the case and only the users are dead but the instruction (=call) is
2663       // still needed.
2664       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2665         A.deleteAfterManifest(*I);
2666         return ChangeStatus::CHANGED;
2667       }
2668     }
2669     if (V.use_empty())
2670       return ChangeStatus::UNCHANGED;
2671 
2672     bool UsedAssumedInformation = false;
2673     Optional<Constant *> C =
2674         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2675     if (C.hasValue() && C.getValue())
2676       return ChangeStatus::UNCHANGED;
2677 
2678     UndefValue &UV = *UndefValue::get(V.getType());
2679     bool AnyChange = A.changeValueAfterManifest(V, UV);
2680     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2681   }
2682 
2683   /// See AbstractAttribute::trackStatistics()
2684   void trackStatistics() const override {
2685     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2686   }
2687 };
2688 
2689 struct AAIsDeadArgument : public AAIsDeadFloating {
2690   AAIsDeadArgument(const IRPosition &IRP) : AAIsDeadFloating(IRP) {}
2691 
2692   /// See AbstractAttribute::initialize(...).
2693   void initialize(Attributor &A) override {
2694     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2695       indicatePessimisticFixpoint();
2696   }
2697 
2698   /// See AbstractAttribute::manifest(...).
2699   ChangeStatus manifest(Attributor &A) override {
2700     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2701     Argument &Arg = *getAssociatedArgument();
2702     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2703       if (A.registerFunctionSignatureRewrite(
2704               Arg, /* ReplacementTypes */ {},
2705               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2706               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{}))
2707         return ChangeStatus::CHANGED;
2708     return Changed;
2709   }
2710 
2711   /// See AbstractAttribute::trackStatistics()
2712   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2713 };
2714 
2715 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2716   AAIsDeadCallSiteArgument(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2717 
2718   /// See AbstractAttribute::initialize(...).
2719   void initialize(Attributor &A) override {
2720     if (isa<UndefValue>(getAssociatedValue()))
2721       indicatePessimisticFixpoint();
2722   }
2723 
2724   /// See AbstractAttribute::updateImpl(...).
2725   ChangeStatus updateImpl(Attributor &A) override {
2726     // TODO: Once we have call site specific value information we can provide
2727     //       call site specific liveness information and then it makes
2728     //       sense to specialize attributes for call sites arguments instead of
2729     //       redirecting requests to the callee argument.
2730     Argument *Arg = getAssociatedArgument();
2731     if (!Arg)
2732       return indicatePessimisticFixpoint();
2733     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2734     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2735     return clampStateAndIndicateChange(
2736         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2737   }
2738 
2739   /// See AbstractAttribute::manifest(...).
2740   ChangeStatus manifest(Attributor &A) override {
2741     CallBase &CB = cast<CallBase>(getAnchorValue());
2742     Use &U = CB.getArgOperandUse(getArgNo());
2743     assert(!isa<UndefValue>(U.get()) &&
2744            "Expected undef values to be filtered out!");
2745     UndefValue &UV = *UndefValue::get(U->getType());
2746     if (A.changeUseAfterManifest(U, UV))
2747       return ChangeStatus::CHANGED;
2748     return ChangeStatus::UNCHANGED;
2749   }
2750 
2751   /// See AbstractAttribute::trackStatistics()
2752   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2753 };
2754 
2755 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2756   AAIsDeadCallSiteReturned(const IRPosition &IRP)
2757       : AAIsDeadFloating(IRP), IsAssumedSideEffectFree(true) {}
2758 
2759   /// See AAIsDead::isAssumedDead().
2760   bool isAssumedDead() const override {
2761     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2762   }
2763 
2764   /// See AbstractAttribute::initialize(...).
2765   void initialize(Attributor &A) override {
2766     if (isa<UndefValue>(getAssociatedValue())) {
2767       indicatePessimisticFixpoint();
2768       return;
2769     }
2770 
2771     // We track this separately as a secondary state.
2772     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2773   }
2774 
2775   /// See AbstractAttribute::updateImpl(...).
2776   ChangeStatus updateImpl(Attributor &A) override {
2777     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2778     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2779       IsAssumedSideEffectFree = false;
2780       Changed = ChangeStatus::CHANGED;
2781     }
2782 
2783     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2784       return indicatePessimisticFixpoint();
2785     return Changed;
2786   }
2787 
2788   /// See AbstractAttribute::trackStatistics()
2789   void trackStatistics() const override {
2790     if (IsAssumedSideEffectFree)
2791       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2792     else
2793       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2794   }
2795 
2796   /// See AbstractAttribute::getAsStr().
2797   const std::string getAsStr() const override {
2798     return isAssumedDead()
2799                ? "assumed-dead"
2800                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2801   }
2802 
2803 private:
2804   bool IsAssumedSideEffectFree;
2805 };
2806 
2807 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2808   AAIsDeadReturned(const IRPosition &IRP) : AAIsDeadValueImpl(IRP) {}
2809 
2810   /// See AbstractAttribute::updateImpl(...).
2811   ChangeStatus updateImpl(Attributor &A) override {
2812 
2813     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2814                               {Instruction::Ret});
2815 
2816     auto PredForCallSite = [&](AbstractCallSite ACS) {
2817       if (ACS.isCallbackCall() || !ACS.getInstruction())
2818         return false;
2819       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2820     };
2821 
2822     bool AllCallSitesKnown;
2823     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2824                                 AllCallSitesKnown))
2825       return indicatePessimisticFixpoint();
2826 
2827     return ChangeStatus::UNCHANGED;
2828   }
2829 
2830   /// See AbstractAttribute::manifest(...).
2831   ChangeStatus manifest(Attributor &A) override {
2832     // TODO: Rewrite the signature to return void?
2833     bool AnyChange = false;
2834     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2835     auto RetInstPred = [&](Instruction &I) {
2836       ReturnInst &RI = cast<ReturnInst>(I);
2837       if (!isa<UndefValue>(RI.getReturnValue()))
2838         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2839       return true;
2840     };
2841     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2842     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2843   }
2844 
2845   /// See AbstractAttribute::trackStatistics()
2846   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2847 };
2848 
2849 struct AAIsDeadFunction : public AAIsDead {
2850   AAIsDeadFunction(const IRPosition &IRP) : AAIsDead(IRP) {}
2851 
2852   /// See AbstractAttribute::initialize(...).
2853   void initialize(Attributor &A) override {
2854     const Function *F = getAnchorScope();
2855     if (F && !F->isDeclaration()) {
2856       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2857       assumeLive(A, F->getEntryBlock());
2858     }
2859   }
2860 
2861   /// See AbstractAttribute::getAsStr().
2862   const std::string getAsStr() const override {
2863     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2864            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2865            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2866            std::to_string(KnownDeadEnds.size()) + "]";
2867   }
2868 
2869   /// See AbstractAttribute::manifest(...).
2870   ChangeStatus manifest(Attributor &A) override {
2871     assert(getState().isValidState() &&
2872            "Attempted to manifest an invalid state!");
2873 
2874     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2875     Function &F = *getAnchorScope();
2876 
2877     if (AssumedLiveBlocks.empty()) {
2878       A.deleteAfterManifest(F);
2879       return ChangeStatus::CHANGED;
2880     }
2881 
2882     // Flag to determine if we can change an invoke to a call assuming the
2883     // callee is nounwind. This is not possible if the personality of the
2884     // function allows to catch asynchronous exceptions.
2885     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2886 
2887     KnownDeadEnds.set_union(ToBeExploredFrom);
2888     for (const Instruction *DeadEndI : KnownDeadEnds) {
2889       auto *CB = dyn_cast<CallBase>(DeadEndI);
2890       if (!CB)
2891         continue;
2892       const auto &NoReturnAA =
2893           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2894       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2895       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2896         continue;
2897 
2898       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2899         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2900       else
2901         A.changeToUnreachableAfterManifest(
2902             const_cast<Instruction *>(DeadEndI->getNextNode()));
2903       HasChanged = ChangeStatus::CHANGED;
2904     }
2905 
2906     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2907     for (BasicBlock &BB : F)
2908       if (!AssumedLiveBlocks.count(&BB)) {
2909         A.deleteAfterManifest(BB);
2910         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2911       }
2912 
2913     return HasChanged;
2914   }
2915 
2916   /// See AbstractAttribute::updateImpl(...).
2917   ChangeStatus updateImpl(Attributor &A) override;
2918 
2919   /// See AbstractAttribute::trackStatistics()
2920   void trackStatistics() const override {}
2921 
2922   /// Returns true if the function is assumed dead.
2923   bool isAssumedDead() const override { return false; }
2924 
2925   /// See AAIsDead::isKnownDead().
2926   bool isKnownDead() const override { return false; }
2927 
2928   /// See AAIsDead::isAssumedDead(BasicBlock *).
2929   bool isAssumedDead(const BasicBlock *BB) const override {
2930     assert(BB->getParent() == getAnchorScope() &&
2931            "BB must be in the same anchor scope function.");
2932 
2933     if (!getAssumed())
2934       return false;
2935     return !AssumedLiveBlocks.count(BB);
2936   }
2937 
2938   /// See AAIsDead::isKnownDead(BasicBlock *).
2939   bool isKnownDead(const BasicBlock *BB) const override {
2940     return getKnown() && isAssumedDead(BB);
2941   }
2942 
2943   /// See AAIsDead::isAssumed(Instruction *I).
2944   bool isAssumedDead(const Instruction *I) const override {
2945     assert(I->getParent()->getParent() == getAnchorScope() &&
2946            "Instruction must be in the same anchor scope function.");
2947 
2948     if (!getAssumed())
2949       return false;
2950 
2951     // If it is not in AssumedLiveBlocks then it for sure dead.
2952     // Otherwise, it can still be after noreturn call in a live block.
2953     if (!AssumedLiveBlocks.count(I->getParent()))
2954       return true;
2955 
2956     // If it is not after a liveness barrier it is live.
2957     const Instruction *PrevI = I->getPrevNode();
2958     while (PrevI) {
2959       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2960         return true;
2961       PrevI = PrevI->getPrevNode();
2962     }
2963     return false;
2964   }
2965 
2966   /// See AAIsDead::isKnownDead(Instruction *I).
2967   bool isKnownDead(const Instruction *I) const override {
2968     return getKnown() && isAssumedDead(I);
2969   }
2970 
2971   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2972   /// that internal function called from \p BB should now be looked at.
2973   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2974     if (!AssumedLiveBlocks.insert(&BB).second)
2975       return false;
2976 
2977     // We assume that all of BB is (probably) live now and if there are calls to
2978     // internal functions we will assume that those are now live as well. This
2979     // is a performance optimization for blocks with calls to a lot of internal
2980     // functions. It can however cause dead functions to be treated as live.
2981     for (const Instruction &I : BB)
2982       if (ImmutableCallSite ICS = ImmutableCallSite(&I))
2983         if (const Function *F = ICS.getCalledFunction())
2984           if (F->hasLocalLinkage())
2985             A.markLiveInternalFunction(*F);
2986     return true;
2987   }
2988 
2989   /// Collection of instructions that need to be explored again, e.g., we
2990   /// did assume they do not transfer control to (one of their) successors.
2991   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2992 
2993   /// Collection of instructions that are known to not transfer control.
2994   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2995 
2996   /// Collection of all assumed live BasicBlocks.
2997   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2998 };
2999 
3000 static bool
3001 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3002                         AbstractAttribute &AA,
3003                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3004   const IRPosition &IPos = IRPosition::callsite_function(CB);
3005 
3006   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3007   if (NoReturnAA.isAssumedNoReturn())
3008     return !NoReturnAA.isKnownNoReturn();
3009   if (CB.isTerminator())
3010     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3011   else
3012     AliveSuccessors.push_back(CB.getNextNode());
3013   return false;
3014 }
3015 
3016 static bool
3017 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3018                         AbstractAttribute &AA,
3019                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3020   bool UsedAssumedInformation =
3021       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3022 
3023   // First, determine if we can change an invoke to a call assuming the
3024   // callee is nounwind. This is not possible if the personality of the
3025   // function allows to catch asynchronous exceptions.
3026   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3027     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3028   } else {
3029     const IRPosition &IPos = IRPosition::callsite_function(II);
3030     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3031     if (AANoUnw.isAssumedNoUnwind()) {
3032       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3033     } else {
3034       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3035     }
3036   }
3037   return UsedAssumedInformation;
3038 }
3039 
3040 static bool
3041 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3042                         AbstractAttribute &AA,
3043                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3044   bool UsedAssumedInformation = false;
3045   if (BI.getNumSuccessors() == 1) {
3046     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3047   } else {
3048     Optional<ConstantInt *> CI = getAssumedConstantInt(
3049         A, *BI.getCondition(), AA, UsedAssumedInformation);
3050     if (!CI.hasValue()) {
3051       // No value yet, assume both edges are dead.
3052     } else if (CI.getValue()) {
3053       const BasicBlock *SuccBB =
3054           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3055       AliveSuccessors.push_back(&SuccBB->front());
3056     } else {
3057       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3058       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3059       UsedAssumedInformation = false;
3060     }
3061   }
3062   return UsedAssumedInformation;
3063 }
3064 
3065 static bool
3066 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3067                         AbstractAttribute &AA,
3068                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3069   bool UsedAssumedInformation = false;
3070   Optional<ConstantInt *> CI =
3071       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3072   if (!CI.hasValue()) {
3073     // No value yet, assume all edges are dead.
3074   } else if (CI.getValue()) {
3075     for (auto &CaseIt : SI.cases()) {
3076       if (CaseIt.getCaseValue() == CI.getValue()) {
3077         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3078         return UsedAssumedInformation;
3079       }
3080     }
3081     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3082     return UsedAssumedInformation;
3083   } else {
3084     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3085       AliveSuccessors.push_back(&SuccBB->front());
3086   }
3087   return UsedAssumedInformation;
3088 }
3089 
3090 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3091   ChangeStatus Change = ChangeStatus::UNCHANGED;
3092 
3093   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3094                     << getAnchorScope()->size() << "] BBs and "
3095                     << ToBeExploredFrom.size() << " exploration points and "
3096                     << KnownDeadEnds.size() << " known dead ends\n");
3097 
3098   // Copy and clear the list of instructions we need to explore from. It is
3099   // refilled with instructions the next update has to look at.
3100   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3101                                                ToBeExploredFrom.end());
3102   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3103 
3104   SmallVector<const Instruction *, 8> AliveSuccessors;
3105   while (!Worklist.empty()) {
3106     const Instruction *I = Worklist.pop_back_val();
3107     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3108 
3109     AliveSuccessors.clear();
3110 
3111     bool UsedAssumedInformation = false;
3112     switch (I->getOpcode()) {
3113     // TODO: look for (assumed) UB to backwards propagate "deadness".
3114     default:
3115       if (I->isTerminator()) {
3116         for (const BasicBlock *SuccBB : successors(I->getParent()))
3117           AliveSuccessors.push_back(&SuccBB->front());
3118       } else {
3119         AliveSuccessors.push_back(I->getNextNode());
3120       }
3121       break;
3122     case Instruction::Call:
3123       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3124                                                        *this, AliveSuccessors);
3125       break;
3126     case Instruction::Invoke:
3127       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3128                                                        *this, AliveSuccessors);
3129       break;
3130     case Instruction::Br:
3131       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3132                                                        *this, AliveSuccessors);
3133       break;
3134     case Instruction::Switch:
3135       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3136                                                        *this, AliveSuccessors);
3137       break;
3138     }
3139 
3140     if (UsedAssumedInformation) {
3141       NewToBeExploredFrom.insert(I);
3142     } else {
3143       Change = ChangeStatus::CHANGED;
3144       if (AliveSuccessors.empty() ||
3145           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3146         KnownDeadEnds.insert(I);
3147     }
3148 
3149     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3150                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3151                       << UsedAssumedInformation << "\n");
3152 
3153     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3154       if (!I->isTerminator()) {
3155         assert(AliveSuccessors.size() == 1 &&
3156                "Non-terminator expected to have a single successor!");
3157         Worklist.push_back(AliveSuccessor);
3158       } else {
3159         if (assumeLive(A, *AliveSuccessor->getParent()))
3160           Worklist.push_back(AliveSuccessor);
3161       }
3162     }
3163   }
3164 
3165   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3166 
3167   // If we know everything is live there is no need to query for liveness.
3168   // Instead, indicating a pessimistic fixpoint will cause the state to be
3169   // "invalid" and all queries to be answered conservatively without lookups.
3170   // To be in this state we have to (1) finished the exploration and (3) not
3171   // discovered any non-trivial dead end and (2) not ruled unreachable code
3172   // dead.
3173   if (ToBeExploredFrom.empty() &&
3174       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3175       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3176         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3177       }))
3178     return indicatePessimisticFixpoint();
3179   return Change;
3180 }
3181 
3182 /// Liveness information for a call sites.
3183 struct AAIsDeadCallSite final : AAIsDeadFunction {
3184   AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadFunction(IRP) {}
3185 
3186   /// See AbstractAttribute::initialize(...).
3187   void initialize(Attributor &A) override {
3188     // TODO: Once we have call site specific value information we can provide
3189     //       call site specific liveness information and then it makes
3190     //       sense to specialize attributes for call sites instead of
3191     //       redirecting requests to the callee.
3192     llvm_unreachable("Abstract attributes for liveness are not "
3193                      "supported for call sites yet!");
3194   }
3195 
3196   /// See AbstractAttribute::updateImpl(...).
3197   ChangeStatus updateImpl(Attributor &A) override {
3198     return indicatePessimisticFixpoint();
3199   }
3200 
3201   /// See AbstractAttribute::trackStatistics()
3202   void trackStatistics() const override {}
3203 };
3204 
3205 /// -------------------- Dereferenceable Argument Attribute --------------------
3206 
3207 template <>
3208 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3209                                                      const DerefState &R) {
3210   ChangeStatus CS0 =
3211       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3212   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3213   return CS0 | CS1;
3214 }
3215 
3216 struct AADereferenceableImpl : AADereferenceable {
3217   AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
3218   using StateType = DerefState;
3219 
3220   void initialize(Attributor &A) override {
3221     SmallVector<Attribute, 4> Attrs;
3222     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3223              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3224     for (const Attribute &Attr : Attrs)
3225       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3226 
3227     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3228                                        /* TrackDependence */ false);
3229 
3230     const IRPosition &IRP = this->getIRPosition();
3231     bool IsFnInterface = IRP.isFnInterfaceKind();
3232     Function *FnScope = IRP.getAnchorScope();
3233     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope)))
3234       indicatePessimisticFixpoint();
3235   }
3236 
3237   /// See AbstractAttribute::getState()
3238   /// {
3239   StateType &getState() override { return *this; }
3240   const StateType &getState() const override { return *this; }
3241   /// }
3242 
3243   /// Helper function for collecting accessed bytes in must-be-executed-context
3244   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3245                               DerefState &State) {
3246     const Value *UseV = U->get();
3247     if (!UseV->getType()->isPointerTy())
3248       return;
3249 
3250     Type *PtrTy = UseV->getType();
3251     const DataLayout &DL = A.getDataLayout();
3252     int64_t Offset;
3253     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3254             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3255       if (Base == &getAssociatedValue() &&
3256           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3257         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3258         State.addAccessedBytes(Offset, Size);
3259       }
3260     }
3261     return;
3262   }
3263 
3264   /// See AAFromMustBeExecutedContext
3265   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3266                  AADereferenceable::StateType &State) {
3267     bool IsNonNull = false;
3268     bool TrackUse = false;
3269     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3270         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3271 
3272     addAccessedBytesForUse(A, U, I, State);
3273     State.takeKnownDerefBytesMaximum(DerefBytes);
3274     return TrackUse;
3275   }
3276 
3277   /// See AbstractAttribute::manifest(...).
3278   ChangeStatus manifest(Attributor &A) override {
3279     ChangeStatus Change = AADereferenceable::manifest(A);
3280     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3281       removeAttrs({Attribute::DereferenceableOrNull});
3282       return ChangeStatus::CHANGED;
3283     }
3284     return Change;
3285   }
3286 
3287   void getDeducedAttributes(LLVMContext &Ctx,
3288                             SmallVectorImpl<Attribute> &Attrs) const override {
3289     // TODO: Add *_globally support
3290     if (isAssumedNonNull())
3291       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3292           Ctx, getAssumedDereferenceableBytes()));
3293     else
3294       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3295           Ctx, getAssumedDereferenceableBytes()));
3296   }
3297 
3298   /// See AbstractAttribute::getAsStr().
3299   const std::string getAsStr() const override {
3300     if (!getAssumedDereferenceableBytes())
3301       return "unknown-dereferenceable";
3302     return std::string("dereferenceable") +
3303            (isAssumedNonNull() ? "" : "_or_null") +
3304            (isAssumedGlobal() ? "_globally" : "") + "<" +
3305            std::to_string(getKnownDereferenceableBytes()) + "-" +
3306            std::to_string(getAssumedDereferenceableBytes()) + ">";
3307   }
3308 };
3309 
3310 /// Dereferenceable attribute for a floating value.
3311 struct AADereferenceableFloating
3312     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3313   using Base =
3314       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3315   AADereferenceableFloating(const IRPosition &IRP) : Base(IRP) {}
3316 
3317   /// See AbstractAttribute::updateImpl(...).
3318   ChangeStatus updateImpl(Attributor &A) override {
3319     ChangeStatus Change = Base::updateImpl(A);
3320 
3321     const DataLayout &DL = A.getDataLayout();
3322 
3323     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3324                             bool Stripped) -> bool {
3325       unsigned IdxWidth =
3326           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3327       APInt Offset(IdxWidth, 0);
3328       const Value *Base =
3329           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3330 
3331       const auto &AA =
3332           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3333       int64_t DerefBytes = 0;
3334       if (!Stripped && this == &AA) {
3335         // Use IR information if we did not strip anything.
3336         // TODO: track globally.
3337         bool CanBeNull;
3338         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3339         T.GlobalState.indicatePessimisticFixpoint();
3340       } else {
3341         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3342         DerefBytes = DS.DerefBytesState.getAssumed();
3343         T.GlobalState &= DS.GlobalState;
3344       }
3345 
3346       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3347 
3348       // For now we do not try to "increase" dereferenceability due to negative
3349       // indices as we first have to come up with code to deal with loops and
3350       // for overflows of the dereferenceable bytes.
3351       int64_t OffsetSExt = Offset.getSExtValue();
3352       if (OffsetSExt < 0)
3353         OffsetSExt = 0;
3354 
3355       T.takeAssumedDerefBytesMinimum(
3356           std::max(int64_t(0), DerefBytes - OffsetSExt));
3357 
3358       if (this == &AA) {
3359         if (!Stripped) {
3360           // If nothing was stripped IR information is all we got.
3361           T.takeKnownDerefBytesMaximum(
3362               std::max(int64_t(0), DerefBytes - OffsetSExt));
3363           T.indicatePessimisticFixpoint();
3364         } else if (OffsetSExt > 0) {
3365           // If something was stripped but there is circular reasoning we look
3366           // for the offset. If it is positive we basically decrease the
3367           // dereferenceable bytes in a circluar loop now, which will simply
3368           // drive them down to the known value in a very slow way which we
3369           // can accelerate.
3370           T.indicatePessimisticFixpoint();
3371         }
3372       }
3373 
3374       return T.isValidState();
3375     };
3376 
3377     DerefState T;
3378     if (!genericValueTraversal<AADereferenceable, DerefState>(
3379             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3380       return indicatePessimisticFixpoint();
3381 
3382     return Change | clampStateAndIndicateChange(getState(), T);
3383   }
3384 
3385   /// See AbstractAttribute::trackStatistics()
3386   void trackStatistics() const override {
3387     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3388   }
3389 };
3390 
3391 /// Dereferenceable attribute for a return value.
3392 struct AADereferenceableReturned final
3393     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3394   AADereferenceableReturned(const IRPosition &IRP)
3395       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3396             IRP) {}
3397 
3398   /// See AbstractAttribute::trackStatistics()
3399   void trackStatistics() const override {
3400     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3401   }
3402 };
3403 
3404 /// Dereferenceable attribute for an argument
3405 struct AADereferenceableArgument final
3406     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3407           AADereferenceable, AADereferenceableImpl> {
3408   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3409       AADereferenceable, AADereferenceableImpl>;
3410   AADereferenceableArgument(const IRPosition &IRP) : Base(IRP) {}
3411 
3412   /// See AbstractAttribute::trackStatistics()
3413   void trackStatistics() const override {
3414     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3415   }
3416 };
3417 
3418 /// Dereferenceable attribute for a call site argument.
3419 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3420   AADereferenceableCallSiteArgument(const IRPosition &IRP)
3421       : AADereferenceableFloating(IRP) {}
3422 
3423   /// See AbstractAttribute::trackStatistics()
3424   void trackStatistics() const override {
3425     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3426   }
3427 };
3428 
3429 /// Dereferenceable attribute deduction for a call site return value.
3430 struct AADereferenceableCallSiteReturned final
3431     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3432           AADereferenceable, AADereferenceableImpl> {
3433   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3434       AADereferenceable, AADereferenceableImpl>;
3435   AADereferenceableCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3436 
3437   /// See AbstractAttribute::trackStatistics()
3438   void trackStatistics() const override {
3439     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3440   }
3441 };
3442 
3443 // ------------------------ Align Argument Attribute ------------------------
3444 
3445 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3446 /// the element type to be aligned.
3447 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3448                                                    const DataLayout &DL) {
3449   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3450   Type *ElementTy = Ptr->getType()->getPointerElementType();
3451   if (ElementTy->isSized())
3452     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3453   return KnownAlignment;
3454 }
3455 
3456 static unsigned getKnownAlignForUse(Attributor &A,
3457                                     AbstractAttribute &QueryingAA,
3458                                     Value &AssociatedValue, const Use *U,
3459                                     const Instruction *I, bool &TrackUse) {
3460   // We need to follow common pointer manipulation uses to the accesses they
3461   // feed into.
3462   if (isa<CastInst>(I)) {
3463     // Follow all but ptr2int casts.
3464     TrackUse = !isa<PtrToIntInst>(I);
3465     return 0;
3466   }
3467   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3468     if (GEP->hasAllConstantIndices()) {
3469       TrackUse = true;
3470       return 0;
3471     }
3472   }
3473 
3474   MaybeAlign MA;
3475   if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
3476     if (ICS.isBundleOperand(U) || ICS.isCallee(U))
3477       return 0;
3478 
3479     unsigned ArgNo = ICS.getArgumentNo(U);
3480     IRPosition IRP = IRPosition::callsite_argument(ICS, ArgNo);
3481     // As long as we only use known information there is no need to track
3482     // dependences here.
3483     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3484                                         /* TrackDependence */ false);
3485     MA = MaybeAlign(AlignAA.getKnownAlign());
3486   }
3487 
3488   const DataLayout &DL = A.getDataLayout();
3489   const Value *UseV = U->get();
3490   if (auto *SI = dyn_cast<StoreInst>(I)) {
3491     if (SI->getPointerOperand() == UseV) {
3492       if (unsigned SIAlign = SI->getAlignment())
3493         MA = MaybeAlign(SIAlign);
3494       else
3495         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3496     }
3497   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3498     if (LI->getPointerOperand() == UseV) {
3499       if (unsigned LIAlign = LI->getAlignment())
3500         MA = MaybeAlign(LIAlign);
3501       else
3502         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3503     }
3504   }
3505 
3506   if (!MA.hasValue() || MA <= 1)
3507     return 0;
3508 
3509   unsigned Alignment = MA->value();
3510   int64_t Offset;
3511 
3512   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3513     if (Base == &AssociatedValue) {
3514       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3515       // So we can say that the maximum power of two which is a divisor of
3516       // gcd(Offset, Alignment) is an alignment.
3517 
3518       uint32_t gcd =
3519           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3520       Alignment = llvm::PowerOf2Floor(gcd);
3521     }
3522   }
3523 
3524   return Alignment;
3525 }
3526 
3527 struct AAAlignImpl : AAAlign {
3528   AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
3529 
3530   /// See AbstractAttribute::initialize(...).
3531   void initialize(Attributor &A) override {
3532     SmallVector<Attribute, 4> Attrs;
3533     getAttrs({Attribute::Alignment}, Attrs);
3534     for (const Attribute &Attr : Attrs)
3535       takeKnownMaximum(Attr.getValueAsInt());
3536 
3537     if (getIRPosition().isFnInterfaceKind() &&
3538         (!getAnchorScope() ||
3539          !A.isFunctionIPOAmendable(*getAssociatedFunction())))
3540       indicatePessimisticFixpoint();
3541   }
3542 
3543   /// See AbstractAttribute::manifest(...).
3544   ChangeStatus manifest(Attributor &A) override {
3545     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3546 
3547     // Check for users that allow alignment annotations.
3548     Value &AssociatedValue = getAssociatedValue();
3549     for (const Use &U : AssociatedValue.uses()) {
3550       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3551         if (SI->getPointerOperand() == &AssociatedValue)
3552           if (SI->getAlignment() < getAssumedAlign()) {
3553             STATS_DECLTRACK(AAAlign, Store,
3554                             "Number of times alignment added to a store");
3555             SI->setAlignment(Align(getAssumedAlign()));
3556             LoadStoreChanged = ChangeStatus::CHANGED;
3557           }
3558       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3559         if (LI->getPointerOperand() == &AssociatedValue)
3560           if (LI->getAlignment() < getAssumedAlign()) {
3561             LI->setAlignment(Align(getAssumedAlign()));
3562             STATS_DECLTRACK(AAAlign, Load,
3563                             "Number of times alignment added to a load");
3564             LoadStoreChanged = ChangeStatus::CHANGED;
3565           }
3566       }
3567     }
3568 
3569     ChangeStatus Changed = AAAlign::manifest(A);
3570 
3571     MaybeAlign InheritAlign =
3572         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3573     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3574       return LoadStoreChanged;
3575     return Changed | LoadStoreChanged;
3576   }
3577 
3578   // TODO: Provide a helper to determine the implied ABI alignment and check in
3579   //       the existing manifest method and a new one for AAAlignImpl that value
3580   //       to avoid making the alignment explicit if it did not improve.
3581 
3582   /// See AbstractAttribute::getDeducedAttributes
3583   virtual void
3584   getDeducedAttributes(LLVMContext &Ctx,
3585                        SmallVectorImpl<Attribute> &Attrs) const override {
3586     if (getAssumedAlign() > 1)
3587       Attrs.emplace_back(
3588           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3589   }
3590   /// See AAFromMustBeExecutedContext
3591   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3592                  AAAlign::StateType &State) {
3593     bool TrackUse = false;
3594 
3595     unsigned int KnownAlign =
3596         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3597     State.takeKnownMaximum(KnownAlign);
3598 
3599     return TrackUse;
3600   }
3601 
3602   /// See AbstractAttribute::getAsStr().
3603   const std::string getAsStr() const override {
3604     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3605                                 "-" + std::to_string(getAssumedAlign()) + ">")
3606                              : "unknown-align";
3607   }
3608 };
3609 
3610 /// Align attribute for a floating value.
3611 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3612   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3613   AAAlignFloating(const IRPosition &IRP) : Base(IRP) {}
3614 
3615   /// See AbstractAttribute::updateImpl(...).
3616   ChangeStatus updateImpl(Attributor &A) override {
3617     Base::updateImpl(A);
3618 
3619     const DataLayout &DL = A.getDataLayout();
3620 
3621     auto VisitValueCB = [&](Value &V, const Instruction *,
3622                             AAAlign::StateType &T, bool Stripped) -> bool {
3623       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3624       if (!Stripped && this == &AA) {
3625         // Use only IR information if we did not strip anything.
3626         const MaybeAlign PA = V.getPointerAlignment(DL);
3627         T.takeKnownMaximum(PA ? PA->value() : 0);
3628         T.indicatePessimisticFixpoint();
3629       } else {
3630         // Use abstract attribute information.
3631         const AAAlign::StateType &DS =
3632             static_cast<const AAAlign::StateType &>(AA.getState());
3633         T ^= DS;
3634       }
3635       return T.isValidState();
3636     };
3637 
3638     StateType T;
3639     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3640                                                    VisitValueCB, getCtxI()))
3641       return indicatePessimisticFixpoint();
3642 
3643     // TODO: If we know we visited all incoming values, thus no are assumed
3644     // dead, we can take the known information from the state T.
3645     return clampStateAndIndicateChange(getState(), T);
3646   }
3647 
3648   /// See AbstractAttribute::trackStatistics()
3649   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3650 };
3651 
3652 /// Align attribute for function return value.
3653 struct AAAlignReturned final
3654     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3655   AAAlignReturned(const IRPosition &IRP)
3656       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
3657 
3658   /// See AbstractAttribute::trackStatistics()
3659   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3660 };
3661 
3662 /// Align attribute for function argument.
3663 struct AAAlignArgument final
3664     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3665                                                               AAAlignImpl> {
3666   using Base =
3667       AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3668                                                               AAAlignImpl>;
3669   AAAlignArgument(const IRPosition &IRP) : Base(IRP) {}
3670 
3671   /// See AbstractAttribute::manifest(...).
3672   ChangeStatus manifest(Attributor &A) override {
3673     // If the associated argument is involved in a must-tail call we give up
3674     // because we would need to keep the argument alignments of caller and
3675     // callee in-sync. Just does not seem worth the trouble right now.
3676     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3677       return ChangeStatus::UNCHANGED;
3678     return Base::manifest(A);
3679   }
3680 
3681   /// See AbstractAttribute::trackStatistics()
3682   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3683 };
3684 
3685 struct AAAlignCallSiteArgument final : AAAlignFloating {
3686   AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
3687 
3688   /// See AbstractAttribute::manifest(...).
3689   ChangeStatus manifest(Attributor &A) override {
3690     // If the associated argument is involved in a must-tail call we give up
3691     // because we would need to keep the argument alignments of caller and
3692     // callee in-sync. Just does not seem worth the trouble right now.
3693     if (Argument *Arg = getAssociatedArgument())
3694       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3695         return ChangeStatus::UNCHANGED;
3696     ChangeStatus Changed = AAAlignImpl::manifest(A);
3697     MaybeAlign InheritAlign =
3698         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3699     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3700       Changed = ChangeStatus::UNCHANGED;
3701     return Changed;
3702   }
3703 
3704   /// See AbstractAttribute::updateImpl(Attributor &A).
3705   ChangeStatus updateImpl(Attributor &A) override {
3706     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3707     if (Argument *Arg = getAssociatedArgument()) {
3708       // We only take known information from the argument
3709       // so we do not need to track a dependence.
3710       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3711           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3712       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3713     }
3714     return Changed;
3715   }
3716 
3717   /// See AbstractAttribute::trackStatistics()
3718   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3719 };
3720 
3721 /// Align attribute deduction for a call site return value.
3722 struct AAAlignCallSiteReturned final
3723     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3724                                                              AAAlignImpl> {
3725   using Base =
3726       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3727                                                              AAAlignImpl>;
3728   AAAlignCallSiteReturned(const IRPosition &IRP) : Base(IRP) {}
3729 
3730   /// See AbstractAttribute::initialize(...).
3731   void initialize(Attributor &A) override {
3732     Base::initialize(A);
3733     Function *F = getAssociatedFunction();
3734     if (!F)
3735       indicatePessimisticFixpoint();
3736   }
3737 
3738   /// See AbstractAttribute::trackStatistics()
3739   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3740 };
3741 
3742 /// ------------------ Function No-Return Attribute ----------------------------
3743 struct AANoReturnImpl : public AANoReturn {
3744   AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
3745 
3746   /// See AbstractAttribute::initialize(...).
3747   void initialize(Attributor &A) override {
3748     AANoReturn::initialize(A);
3749     Function *F = getAssociatedFunction();
3750     if (!F)
3751       indicatePessimisticFixpoint();
3752   }
3753 
3754   /// See AbstractAttribute::getAsStr().
3755   const std::string getAsStr() const override {
3756     return getAssumed() ? "noreturn" : "may-return";
3757   }
3758 
3759   /// See AbstractAttribute::updateImpl(Attributor &A).
3760   virtual ChangeStatus updateImpl(Attributor &A) override {
3761     auto CheckForNoReturn = [](Instruction &) { return false; };
3762     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3763                                    {(unsigned)Instruction::Ret}))
3764       return indicatePessimisticFixpoint();
3765     return ChangeStatus::UNCHANGED;
3766   }
3767 };
3768 
3769 struct AANoReturnFunction final : AANoReturnImpl {
3770   AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3771 
3772   /// See AbstractAttribute::trackStatistics()
3773   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3774 };
3775 
3776 /// NoReturn attribute deduction for a call sites.
3777 struct AANoReturnCallSite final : AANoReturnImpl {
3778   AANoReturnCallSite(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
3779 
3780   /// See AbstractAttribute::updateImpl(...).
3781   ChangeStatus updateImpl(Attributor &A) override {
3782     // TODO: Once we have call site specific value information we can provide
3783     //       call site specific liveness information and then it makes
3784     //       sense to specialize attributes for call sites arguments instead of
3785     //       redirecting requests to the callee argument.
3786     Function *F = getAssociatedFunction();
3787     const IRPosition &FnPos = IRPosition::function(*F);
3788     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3789     return clampStateAndIndicateChange(
3790         getState(),
3791         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3792   }
3793 
3794   /// See AbstractAttribute::trackStatistics()
3795   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3796 };
3797 
3798 /// ----------------------- Variable Capturing ---------------------------------
3799 
3800 /// A class to hold the state of for no-capture attributes.
3801 struct AANoCaptureImpl : public AANoCapture {
3802   AANoCaptureImpl(const IRPosition &IRP) : AANoCapture(IRP) {}
3803 
3804   /// See AbstractAttribute::initialize(...).
3805   void initialize(Attributor &A) override {
3806     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3807       indicateOptimisticFixpoint();
3808       return;
3809     }
3810     Function *AnchorScope = getAnchorScope();
3811     if (isFnInterfaceKind() &&
3812         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3813       indicatePessimisticFixpoint();
3814       return;
3815     }
3816 
3817     // You cannot "capture" null in the default address space.
3818     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3819         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3820       indicateOptimisticFixpoint();
3821       return;
3822     }
3823 
3824     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3825 
3826     // Check what state the associated function can actually capture.
3827     if (F)
3828       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3829     else
3830       indicatePessimisticFixpoint();
3831   }
3832 
3833   /// See AbstractAttribute::updateImpl(...).
3834   ChangeStatus updateImpl(Attributor &A) override;
3835 
3836   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3837   virtual void
3838   getDeducedAttributes(LLVMContext &Ctx,
3839                        SmallVectorImpl<Attribute> &Attrs) const override {
3840     if (!isAssumedNoCaptureMaybeReturned())
3841       return;
3842 
3843     if (getArgNo() >= 0) {
3844       if (isAssumedNoCapture())
3845         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3846       else if (ManifestInternal)
3847         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3848     }
3849   }
3850 
3851   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3852   /// depending on the ability of the function associated with \p IRP to capture
3853   /// state in memory and through "returning/throwing", respectively.
3854   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3855                                                    const Function &F,
3856                                                    BitIntegerState &State) {
3857     // TODO: Once we have memory behavior attributes we should use them here.
3858 
3859     // If we know we cannot communicate or write to memory, we do not care about
3860     // ptr2int anymore.
3861     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3862         F.getReturnType()->isVoidTy()) {
3863       State.addKnownBits(NO_CAPTURE);
3864       return;
3865     }
3866 
3867     // A function cannot capture state in memory if it only reads memory, it can
3868     // however return/throw state and the state might be influenced by the
3869     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3870     if (F.onlyReadsMemory())
3871       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3872 
3873     // A function cannot communicate state back if it does not through
3874     // exceptions and doesn not return values.
3875     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3876       State.addKnownBits(NOT_CAPTURED_IN_RET);
3877 
3878     // Check existing "returned" attributes.
3879     int ArgNo = IRP.getArgNo();
3880     if (F.doesNotThrow() && ArgNo >= 0) {
3881       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3882         if (F.hasParamAttribute(u, Attribute::Returned)) {
3883           if (u == unsigned(ArgNo))
3884             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3885           else if (F.onlyReadsMemory())
3886             State.addKnownBits(NO_CAPTURE);
3887           else
3888             State.addKnownBits(NOT_CAPTURED_IN_RET);
3889           break;
3890         }
3891     }
3892   }
3893 
3894   /// See AbstractState::getAsStr().
3895   const std::string getAsStr() const override {
3896     if (isKnownNoCapture())
3897       return "known not-captured";
3898     if (isAssumedNoCapture())
3899       return "assumed not-captured";
3900     if (isKnownNoCaptureMaybeReturned())
3901       return "known not-captured-maybe-returned";
3902     if (isAssumedNoCaptureMaybeReturned())
3903       return "assumed not-captured-maybe-returned";
3904     return "assumed-captured";
3905   }
3906 };
3907 
3908 /// Attributor-aware capture tracker.
3909 struct AACaptureUseTracker final : public CaptureTracker {
3910 
3911   /// Create a capture tracker that can lookup in-flight abstract attributes
3912   /// through the Attributor \p A.
3913   ///
3914   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3915   /// search is stopped. If a use leads to a return instruction,
3916   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3917   /// If a use leads to a ptr2int which may capture the value,
3918   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3919   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3920   /// set. All values in \p PotentialCopies are later tracked as well. For every
3921   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3922   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3923   /// conservatively set to true.
3924   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3925                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3926                       SmallVectorImpl<const Value *> &PotentialCopies,
3927                       unsigned &RemainingUsesToExplore)
3928       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3929         PotentialCopies(PotentialCopies),
3930         RemainingUsesToExplore(RemainingUsesToExplore) {}
3931 
3932   /// Determine if \p V maybe captured. *Also updates the state!*
3933   bool valueMayBeCaptured(const Value *V) {
3934     if (V->getType()->isPointerTy()) {
3935       PointerMayBeCaptured(V, this);
3936     } else {
3937       State.indicatePessimisticFixpoint();
3938     }
3939     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3940   }
3941 
3942   /// See CaptureTracker::tooManyUses().
3943   void tooManyUses() override {
3944     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3945   }
3946 
3947   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3948     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3949       return true;
3950     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3951         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3952         DepClassTy::OPTIONAL);
3953     return DerefAA.getAssumedDereferenceableBytes();
3954   }
3955 
3956   /// See CaptureTracker::captured(...).
3957   bool captured(const Use *U) override {
3958     Instruction *UInst = cast<Instruction>(U->getUser());
3959     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3960                       << "\n");
3961 
3962     // Because we may reuse the tracker multiple times we keep track of the
3963     // number of explored uses ourselves as well.
3964     if (RemainingUsesToExplore-- == 0) {
3965       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3966       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3967                           /* Return */ true);
3968     }
3969 
3970     // Deal with ptr2int by following uses.
3971     if (isa<PtrToIntInst>(UInst)) {
3972       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3973       return valueMayBeCaptured(UInst);
3974     }
3975 
3976     // Explicitly catch return instructions.
3977     if (isa<ReturnInst>(UInst))
3978       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3979                           /* Return */ true);
3980 
3981     // For now we only use special logic for call sites. However, the tracker
3982     // itself knows about a lot of other non-capturing cases already.
3983     CallSite CS(UInst);
3984     if (!CS || !CS.isArgOperand(U))
3985       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3986                           /* Return */ true);
3987 
3988     unsigned ArgNo = CS.getArgumentNo(U);
3989     const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
3990     // If we have a abstract no-capture attribute for the argument we can use
3991     // it to justify a non-capture attribute here. This allows recursion!
3992     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3993     if (ArgNoCaptureAA.isAssumedNoCapture())
3994       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3995                           /* Return */ false);
3996     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3997       addPotentialCopy(CS);
3998       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3999                           /* Return */ false);
4000     }
4001 
4002     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4003     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4004                         /* Return */ true);
4005   }
4006 
4007   /// Register \p CS as potential copy of the value we are checking.
4008   void addPotentialCopy(CallSite CS) {
4009     PotentialCopies.push_back(CS.getInstruction());
4010   }
4011 
4012   /// See CaptureTracker::shouldExplore(...).
4013   bool shouldExplore(const Use *U) override {
4014     // Check liveness and ignore droppable users.
4015     return !U->getUser()->isDroppable() &&
4016            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4017   }
4018 
4019   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4020   /// \p CapturedInRet, then return the appropriate value for use in the
4021   /// CaptureTracker::captured() interface.
4022   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4023                     bool CapturedInRet) {
4024     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4025                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4026     if (CapturedInMem)
4027       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4028     if (CapturedInInt)
4029       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4030     if (CapturedInRet)
4031       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4032     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4033   }
4034 
4035 private:
4036   /// The attributor providing in-flight abstract attributes.
4037   Attributor &A;
4038 
4039   /// The abstract attribute currently updated.
4040   AANoCapture &NoCaptureAA;
4041 
4042   /// The abstract liveness state.
4043   const AAIsDead &IsDeadAA;
4044 
4045   /// The state currently updated.
4046   AANoCapture::StateType &State;
4047 
4048   /// Set of potential copies of the tracked value.
4049   SmallVectorImpl<const Value *> &PotentialCopies;
4050 
4051   /// Global counter to limit the number of explored uses.
4052   unsigned &RemainingUsesToExplore;
4053 };
4054 
4055 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4056   const IRPosition &IRP = getIRPosition();
4057   const Value *V =
4058       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4059   if (!V)
4060     return indicatePessimisticFixpoint();
4061 
4062   const Function *F =
4063       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4064   assert(F && "Expected a function!");
4065   const IRPosition &FnPos = IRPosition::function(*F);
4066   const auto &IsDeadAA =
4067       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4068 
4069   AANoCapture::StateType T;
4070 
4071   // Readonly means we cannot capture through memory.
4072   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4073       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4074   if (FnMemAA.isAssumedReadOnly()) {
4075     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4076     if (FnMemAA.isKnownReadOnly())
4077       addKnownBits(NOT_CAPTURED_IN_MEM);
4078   }
4079 
4080   // Make sure all returned values are different than the underlying value.
4081   // TODO: we could do this in a more sophisticated way inside
4082   //       AAReturnedValues, e.g., track all values that escape through returns
4083   //       directly somehow.
4084   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4085     bool SeenConstant = false;
4086     for (auto &It : RVAA.returned_values()) {
4087       if (isa<Constant>(It.first)) {
4088         if (SeenConstant)
4089           return false;
4090         SeenConstant = true;
4091       } else if (!isa<Argument>(It.first) ||
4092                  It.first == getAssociatedArgument())
4093         return false;
4094     }
4095     return true;
4096   };
4097 
4098   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4099       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4100   if (NoUnwindAA.isAssumedNoUnwind()) {
4101     bool IsVoidTy = F->getReturnType()->isVoidTy();
4102     const AAReturnedValues *RVAA =
4103         IsVoidTy ? nullptr
4104                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4105                                                  /* TrackDependence */ true,
4106                                                  DepClassTy::OPTIONAL);
4107     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4108       T.addKnownBits(NOT_CAPTURED_IN_RET);
4109       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4110         return ChangeStatus::UNCHANGED;
4111       if (NoUnwindAA.isKnownNoUnwind() &&
4112           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4113         addKnownBits(NOT_CAPTURED_IN_RET);
4114         if (isKnown(NOT_CAPTURED_IN_MEM))
4115           return indicateOptimisticFixpoint();
4116       }
4117     }
4118   }
4119 
4120   // Use the CaptureTracker interface and logic with the specialized tracker,
4121   // defined in AACaptureUseTracker, that can look at in-flight abstract
4122   // attributes and directly updates the assumed state.
4123   SmallVector<const Value *, 4> PotentialCopies;
4124   unsigned RemainingUsesToExplore = DefaultMaxUsesToExplore;
4125   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4126                               RemainingUsesToExplore);
4127 
4128   // Check all potential copies of the associated value until we can assume
4129   // none will be captured or we have to assume at least one might be.
4130   unsigned Idx = 0;
4131   PotentialCopies.push_back(V);
4132   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4133     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4134 
4135   AANoCapture::StateType &S = getState();
4136   auto Assumed = S.getAssumed();
4137   S.intersectAssumedBits(T.getAssumed());
4138   if (!isAssumedNoCaptureMaybeReturned())
4139     return indicatePessimisticFixpoint();
4140   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4141                                    : ChangeStatus::CHANGED;
4142 }
4143 
4144 /// NoCapture attribute for function arguments.
4145 struct AANoCaptureArgument final : AANoCaptureImpl {
4146   AANoCaptureArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4147 
4148   /// See AbstractAttribute::trackStatistics()
4149   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4150 };
4151 
4152 /// NoCapture attribute for call site arguments.
4153 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4154   AANoCaptureCallSiteArgument(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4155 
4156   /// See AbstractAttribute::initialize(...).
4157   void initialize(Attributor &A) override {
4158     if (Argument *Arg = getAssociatedArgument())
4159       if (Arg->hasByValAttr())
4160         indicateOptimisticFixpoint();
4161     AANoCaptureImpl::initialize(A);
4162   }
4163 
4164   /// See AbstractAttribute::updateImpl(...).
4165   ChangeStatus updateImpl(Attributor &A) override {
4166     // TODO: Once we have call site specific value information we can provide
4167     //       call site specific liveness information and then it makes
4168     //       sense to specialize attributes for call sites arguments instead of
4169     //       redirecting requests to the callee argument.
4170     Argument *Arg = getAssociatedArgument();
4171     if (!Arg)
4172       return indicatePessimisticFixpoint();
4173     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4174     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4175     return clampStateAndIndicateChange(
4176         getState(),
4177         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4178   }
4179 
4180   /// See AbstractAttribute::trackStatistics()
4181   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4182 };
4183 
4184 /// NoCapture attribute for floating values.
4185 struct AANoCaptureFloating final : AANoCaptureImpl {
4186   AANoCaptureFloating(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4187 
4188   /// See AbstractAttribute::trackStatistics()
4189   void trackStatistics() const override {
4190     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4191   }
4192 };
4193 
4194 /// NoCapture attribute for function return value.
4195 struct AANoCaptureReturned final : AANoCaptureImpl {
4196   AANoCaptureReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {
4197     llvm_unreachable("NoCapture is not applicable to function returns!");
4198   }
4199 
4200   /// See AbstractAttribute::initialize(...).
4201   void initialize(Attributor &A) override {
4202     llvm_unreachable("NoCapture is not applicable to function returns!");
4203   }
4204 
4205   /// See AbstractAttribute::updateImpl(...).
4206   ChangeStatus updateImpl(Attributor &A) override {
4207     llvm_unreachable("NoCapture is not applicable to function returns!");
4208   }
4209 
4210   /// See AbstractAttribute::trackStatistics()
4211   void trackStatistics() const override {}
4212 };
4213 
4214 /// NoCapture attribute deduction for a call site return value.
4215 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4216   AANoCaptureCallSiteReturned(const IRPosition &IRP) : AANoCaptureImpl(IRP) {}
4217 
4218   /// See AbstractAttribute::trackStatistics()
4219   void trackStatistics() const override {
4220     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4221   }
4222 };
4223 
4224 /// ------------------ Value Simplify Attribute ----------------------------
4225 struct AAValueSimplifyImpl : AAValueSimplify {
4226   AAValueSimplifyImpl(const IRPosition &IRP) : AAValueSimplify(IRP) {}
4227 
4228   /// See AbstractAttribute::initialize(...).
4229   void initialize(Attributor &A) override {
4230     if (getAssociatedValue().getType()->isVoidTy())
4231       indicatePessimisticFixpoint();
4232   }
4233 
4234   /// See AbstractAttribute::getAsStr().
4235   const std::string getAsStr() const override {
4236     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4237                         : "not-simple";
4238   }
4239 
4240   /// See AbstractAttribute::trackStatistics()
4241   void trackStatistics() const override {}
4242 
4243   /// See AAValueSimplify::getAssumedSimplifiedValue()
4244   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4245     if (!getAssumed())
4246       return const_cast<Value *>(&getAssociatedValue());
4247     return SimplifiedAssociatedValue;
4248   }
4249 
4250   /// Helper function for querying AAValueSimplify and updating candicate.
4251   /// \param QueryingValue Value trying to unify with SimplifiedValue
4252   /// \param AccumulatedSimplifiedValue Current simplification result.
4253   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4254                              Value &QueryingValue,
4255                              Optional<Value *> &AccumulatedSimplifiedValue) {
4256     // FIXME: Add a typecast support.
4257 
4258     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4259         QueryingAA, IRPosition::value(QueryingValue));
4260 
4261     Optional<Value *> QueryingValueSimplified =
4262         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4263 
4264     if (!QueryingValueSimplified.hasValue())
4265       return true;
4266 
4267     if (!QueryingValueSimplified.getValue())
4268       return false;
4269 
4270     Value &QueryingValueSimplifiedUnwrapped =
4271         *QueryingValueSimplified.getValue();
4272 
4273     if (AccumulatedSimplifiedValue.hasValue() &&
4274         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4275         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4276       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4277     if (AccumulatedSimplifiedValue.hasValue() &&
4278         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4279       return true;
4280 
4281     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4282                       << " is assumed to be "
4283                       << QueryingValueSimplifiedUnwrapped << "\n");
4284 
4285     AccumulatedSimplifiedValue = QueryingValueSimplified;
4286     return true;
4287   }
4288 
4289   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4290     if (!getAssociatedValue().getType()->isIntegerTy())
4291       return false;
4292 
4293     const auto &ValueConstantRangeAA =
4294         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4295 
4296     Optional<ConstantInt *> COpt =
4297         ValueConstantRangeAA.getAssumedConstantInt(A);
4298     if (COpt.hasValue()) {
4299       if (auto *C = COpt.getValue())
4300         SimplifiedAssociatedValue = C;
4301       else
4302         return false;
4303     } else {
4304       SimplifiedAssociatedValue = llvm::None;
4305     }
4306     return true;
4307   }
4308 
4309   /// See AbstractAttribute::manifest(...).
4310   ChangeStatus manifest(Attributor &A) override {
4311     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4312 
4313     if (SimplifiedAssociatedValue.hasValue() &&
4314         !SimplifiedAssociatedValue.getValue())
4315       return Changed;
4316 
4317     Value &V = getAssociatedValue();
4318     auto *C = SimplifiedAssociatedValue.hasValue()
4319                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4320                   : UndefValue::get(V.getType());
4321     if (C) {
4322       // We can replace the AssociatedValue with the constant.
4323       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4324         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4325                           << " :: " << *this << "\n");
4326         if (A.changeValueAfterManifest(V, *C))
4327           Changed = ChangeStatus::CHANGED;
4328       }
4329     }
4330 
4331     return Changed | AAValueSimplify::manifest(A);
4332   }
4333 
4334   /// See AbstractState::indicatePessimisticFixpoint(...).
4335   ChangeStatus indicatePessimisticFixpoint() override {
4336     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4337     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4338     SimplifiedAssociatedValue = &getAssociatedValue();
4339     indicateOptimisticFixpoint();
4340     return ChangeStatus::CHANGED;
4341   }
4342 
4343 protected:
4344   // An assumed simplified value. Initially, it is set to Optional::None, which
4345   // means that the value is not clear under current assumption. If in the
4346   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4347   // returns orignal associated value.
4348   Optional<Value *> SimplifiedAssociatedValue;
4349 };
4350 
4351 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4352   AAValueSimplifyArgument(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4353 
4354   void initialize(Attributor &A) override {
4355     AAValueSimplifyImpl::initialize(A);
4356     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4357       indicatePessimisticFixpoint();
4358     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4359                 /* IgnoreSubsumingPositions */ true))
4360       indicatePessimisticFixpoint();
4361 
4362     // FIXME: This is a hack to prevent us from propagating function poiner in
4363     // the new pass manager CGSCC pass as it creates call edges the
4364     // CallGraphUpdater cannot handle yet.
4365     Value &V = getAssociatedValue();
4366     if (V.getType()->isPointerTy() &&
4367         V.getType()->getPointerElementType()->isFunctionTy() &&
4368         !A.isModulePass())
4369       indicatePessimisticFixpoint();
4370   }
4371 
4372   /// See AbstractAttribute::updateImpl(...).
4373   ChangeStatus updateImpl(Attributor &A) override {
4374     // Byval is only replacable if it is readonly otherwise we would write into
4375     // the replaced value and not the copy that byval creates implicitly.
4376     Argument *Arg = getAssociatedArgument();
4377     if (Arg->hasByValAttr()) {
4378       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4379       //       there is no race by not copying a constant byval.
4380       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4381       if (!MemAA.isAssumedReadOnly())
4382         return indicatePessimisticFixpoint();
4383     }
4384 
4385     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4386 
4387     auto PredForCallSite = [&](AbstractCallSite ACS) {
4388       const IRPosition &ACSArgPos =
4389           IRPosition::callsite_argument(ACS, getArgNo());
4390       // Check if a coresponding argument was found or if it is on not
4391       // associated (which can happen for callback calls).
4392       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4393         return false;
4394 
4395       // We can only propagate thread independent values through callbacks.
4396       // This is different to direct/indirect call sites because for them we
4397       // know the thread executing the caller and callee is the same. For
4398       // callbacks this is not guaranteed, thus a thread dependent value could
4399       // be different for the caller and callee, making it invalid to propagate.
4400       Value &ArgOp = ACSArgPos.getAssociatedValue();
4401       if (ACS.isCallbackCall())
4402         if (auto *C = dyn_cast<Constant>(&ArgOp))
4403           if (C->isThreadDependent())
4404             return false;
4405       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4406     };
4407 
4408     bool AllCallSitesKnown;
4409     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4410                                 AllCallSitesKnown))
4411       if (!askSimplifiedValueForAAValueConstantRange(A))
4412         return indicatePessimisticFixpoint();
4413 
4414     // If a candicate was found in this update, return CHANGED.
4415     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4416                ? ChangeStatus::UNCHANGED
4417                : ChangeStatus ::CHANGED;
4418   }
4419 
4420   /// See AbstractAttribute::trackStatistics()
4421   void trackStatistics() const override {
4422     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4423   }
4424 };
4425 
4426 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4427   AAValueSimplifyReturned(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4428 
4429   /// See AbstractAttribute::updateImpl(...).
4430   ChangeStatus updateImpl(Attributor &A) override {
4431     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4432 
4433     auto PredForReturned = [&](Value &V) {
4434       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4435     };
4436 
4437     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4438       if (!askSimplifiedValueForAAValueConstantRange(A))
4439         return indicatePessimisticFixpoint();
4440 
4441     // If a candicate was found in this update, return CHANGED.
4442     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4443                ? ChangeStatus::UNCHANGED
4444                : ChangeStatus ::CHANGED;
4445   }
4446 
4447   ChangeStatus manifest(Attributor &A) override {
4448     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4449 
4450     if (SimplifiedAssociatedValue.hasValue() &&
4451         !SimplifiedAssociatedValue.getValue())
4452       return Changed;
4453 
4454     Value &V = getAssociatedValue();
4455     auto *C = SimplifiedAssociatedValue.hasValue()
4456                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4457                   : UndefValue::get(V.getType());
4458     if (C) {
4459       auto PredForReturned =
4460           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4461             // We can replace the AssociatedValue with the constant.
4462             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4463               return true;
4464 
4465             for (ReturnInst *RI : RetInsts) {
4466               if (RI->getFunction() != getAnchorScope())
4467                 continue;
4468               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4469                                 << " in " << *RI << " :: " << *this << "\n");
4470               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4471                 Changed = ChangeStatus::CHANGED;
4472             }
4473             return true;
4474           };
4475       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4476     }
4477 
4478     return Changed | AAValueSimplify::manifest(A);
4479   }
4480 
4481   /// See AbstractAttribute::trackStatistics()
4482   void trackStatistics() const override {
4483     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4484   }
4485 };
4486 
4487 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4488   AAValueSimplifyFloating(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4489 
4490   /// See AbstractAttribute::initialize(...).
4491   void initialize(Attributor &A) override {
4492     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4493     //        Needs investigation.
4494     // AAValueSimplifyImpl::initialize(A);
4495     Value &V = getAnchorValue();
4496 
4497     // TODO: add other stuffs
4498     if (isa<Constant>(V))
4499       indicatePessimisticFixpoint();
4500   }
4501 
4502   /// See AbstractAttribute::updateImpl(...).
4503   ChangeStatus updateImpl(Attributor &A) override {
4504     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4505 
4506     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4507                             bool Stripped) -> bool {
4508       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4509       if (!Stripped && this == &AA) {
4510         // TODO: Look the instruction and check recursively.
4511 
4512         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4513                           << "\n");
4514         return false;
4515       }
4516       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4517     };
4518 
4519     bool Dummy = false;
4520     if (!genericValueTraversal<AAValueSimplify, bool>(
4521             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4522       if (!askSimplifiedValueForAAValueConstantRange(A))
4523         return indicatePessimisticFixpoint();
4524 
4525     // If a candicate was found in this update, return CHANGED.
4526 
4527     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4528                ? ChangeStatus::UNCHANGED
4529                : ChangeStatus ::CHANGED;
4530   }
4531 
4532   /// See AbstractAttribute::trackStatistics()
4533   void trackStatistics() const override {
4534     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4535   }
4536 };
4537 
4538 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4539   AAValueSimplifyFunction(const IRPosition &IRP) : AAValueSimplifyImpl(IRP) {}
4540 
4541   /// See AbstractAttribute::initialize(...).
4542   void initialize(Attributor &A) override {
4543     SimplifiedAssociatedValue = &getAnchorValue();
4544     indicateOptimisticFixpoint();
4545   }
4546   /// See AbstractAttribute::initialize(...).
4547   ChangeStatus updateImpl(Attributor &A) override {
4548     llvm_unreachable(
4549         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4550   }
4551   /// See AbstractAttribute::trackStatistics()
4552   void trackStatistics() const override {
4553     STATS_DECLTRACK_FN_ATTR(value_simplify)
4554   }
4555 };
4556 
4557 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4558   AAValueSimplifyCallSite(const IRPosition &IRP)
4559       : AAValueSimplifyFunction(IRP) {}
4560   /// See AbstractAttribute::trackStatistics()
4561   void trackStatistics() const override {
4562     STATS_DECLTRACK_CS_ATTR(value_simplify)
4563   }
4564 };
4565 
4566 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4567   AAValueSimplifyCallSiteReturned(const IRPosition &IRP)
4568       : AAValueSimplifyReturned(IRP) {}
4569 
4570   /// See AbstractAttribute::manifest(...).
4571   ChangeStatus manifest(Attributor &A) override {
4572     return AAValueSimplifyImpl::manifest(A);
4573   }
4574 
4575   void trackStatistics() const override {
4576     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4577   }
4578 };
4579 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4580   AAValueSimplifyCallSiteArgument(const IRPosition &IRP)
4581       : AAValueSimplifyFloating(IRP) {}
4582 
4583   void trackStatistics() const override {
4584     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4585   }
4586 };
4587 
4588 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4589 struct AAHeapToStackImpl : public AAHeapToStack {
4590   AAHeapToStackImpl(const IRPosition &IRP) : AAHeapToStack(IRP) {}
4591 
4592   const std::string getAsStr() const override {
4593     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4594   }
4595 
4596   ChangeStatus manifest(Attributor &A) override {
4597     assert(getState().isValidState() &&
4598            "Attempted to manifest an invalid state!");
4599 
4600     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4601     Function *F = getAnchorScope();
4602     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4603 
4604     for (Instruction *MallocCall : MallocCalls) {
4605       // This malloc cannot be replaced.
4606       if (BadMallocCalls.count(MallocCall))
4607         continue;
4608 
4609       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4610         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4611         A.deleteAfterManifest(*FreeCall);
4612         HasChanged = ChangeStatus::CHANGED;
4613       }
4614 
4615       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4616                         << "\n");
4617 
4618       MaybeAlign Alignment;
4619       Constant *Size;
4620       if (isCallocLikeFn(MallocCall, TLI)) {
4621         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4622         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4623         APInt TotalSize = SizeT->getValue() * Num->getValue();
4624         Size =
4625             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4626       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4627         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4628         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4629                                    ->getValue()
4630                                    .getZExtValue());
4631       } else {
4632         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4633       }
4634 
4635       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4636       Instruction *AI =
4637           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4638                          "", MallocCall->getNextNode());
4639 
4640       if (AI->getType() != MallocCall->getType())
4641         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4642                              AI->getNextNode());
4643 
4644       A.changeValueAfterManifest(*MallocCall, *AI);
4645 
4646       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4647         auto *NBB = II->getNormalDest();
4648         BranchInst::Create(NBB, MallocCall->getParent());
4649         A.deleteAfterManifest(*MallocCall);
4650       } else {
4651         A.deleteAfterManifest(*MallocCall);
4652       }
4653 
4654       // Zero out the allocated memory if it was a calloc.
4655       if (isCallocLikeFn(MallocCall, TLI)) {
4656         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4657                                    AI->getNextNode());
4658         Value *Ops[] = {
4659             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4660             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4661 
4662         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4663         Module *M = F->getParent();
4664         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4665         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4666       }
4667       HasChanged = ChangeStatus::CHANGED;
4668     }
4669 
4670     return HasChanged;
4671   }
4672 
4673   /// Collection of all malloc calls in a function.
4674   SmallSetVector<Instruction *, 4> MallocCalls;
4675 
4676   /// Collection of malloc calls that cannot be converted.
4677   DenseSet<const Instruction *> BadMallocCalls;
4678 
4679   /// A map for each malloc call to the set of associated free calls.
4680   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4681 
4682   ChangeStatus updateImpl(Attributor &A) override;
4683 };
4684 
4685 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4686   const Function *F = getAnchorScope();
4687   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4688 
4689   MustBeExecutedContextExplorer &Explorer =
4690       A.getInfoCache().getMustBeExecutedContextExplorer();
4691 
4692   auto FreeCheck = [&](Instruction &I) {
4693     const auto &Frees = FreesForMalloc.lookup(&I);
4694     if (Frees.size() != 1)
4695       return false;
4696     Instruction *UniqueFree = *Frees.begin();
4697     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4698   };
4699 
4700   auto UsesCheck = [&](Instruction &I) {
4701     bool ValidUsesOnly = true;
4702     bool MustUse = true;
4703     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4704       Instruction *UserI = cast<Instruction>(U.getUser());
4705       if (isa<LoadInst>(UserI))
4706         return true;
4707       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4708         if (SI->getValueOperand() == U.get()) {
4709           LLVM_DEBUG(dbgs()
4710                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4711           ValidUsesOnly = false;
4712         } else {
4713           // A store into the malloc'ed memory is fine.
4714         }
4715         return true;
4716       }
4717       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4718         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4719           return true;
4720         // Record malloc.
4721         if (isFreeCall(UserI, TLI)) {
4722           if (MustUse) {
4723             FreesForMalloc[&I].insert(UserI);
4724           } else {
4725             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4726                               << *UserI << "\n");
4727             ValidUsesOnly = false;
4728           }
4729           return true;
4730         }
4731 
4732         unsigned ArgNo = CB->getArgOperandNo(&U);
4733 
4734         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4735             *this, IRPosition::callsite_argument(*CB, ArgNo));
4736 
4737         // If a callsite argument use is nofree, we are fine.
4738         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4739             *this, IRPosition::callsite_argument(*CB, ArgNo));
4740 
4741         if (!NoCaptureAA.isAssumedNoCapture() ||
4742             !ArgNoFreeAA.isAssumedNoFree()) {
4743           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4744           ValidUsesOnly = false;
4745         }
4746         return true;
4747       }
4748 
4749       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4750           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4751         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4752         Follow = true;
4753         return true;
4754       }
4755       // Unknown user for which we can not track uses further (in a way that
4756       // makes sense).
4757       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4758       ValidUsesOnly = false;
4759       return true;
4760     };
4761     A.checkForAllUses(Pred, *this, I);
4762     return ValidUsesOnly;
4763   };
4764 
4765   auto MallocCallocCheck = [&](Instruction &I) {
4766     if (BadMallocCalls.count(&I))
4767       return true;
4768 
4769     bool IsMalloc = isMallocLikeFn(&I, TLI);
4770     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4771     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4772     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4773       BadMallocCalls.insert(&I);
4774       return true;
4775     }
4776 
4777     if (IsMalloc) {
4778       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4779         if (Size->getValue().ule(MaxHeapToStackSize))
4780           if (UsesCheck(I) || FreeCheck(I)) {
4781             MallocCalls.insert(&I);
4782             return true;
4783           }
4784     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4785       // Only if the alignment and sizes are constant.
4786       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4787         if (Size->getValue().ule(MaxHeapToStackSize))
4788           if (UsesCheck(I) || FreeCheck(I)) {
4789             MallocCalls.insert(&I);
4790             return true;
4791           }
4792     } else if (IsCalloc) {
4793       bool Overflow = false;
4794       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4795         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4796           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4797                   .ule(MaxHeapToStackSize))
4798             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4799               MallocCalls.insert(&I);
4800               return true;
4801             }
4802     }
4803 
4804     BadMallocCalls.insert(&I);
4805     return true;
4806   };
4807 
4808   size_t NumBadMallocs = BadMallocCalls.size();
4809 
4810   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4811 
4812   if (NumBadMallocs != BadMallocCalls.size())
4813     return ChangeStatus::CHANGED;
4814 
4815   return ChangeStatus::UNCHANGED;
4816 }
4817 
4818 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4819   AAHeapToStackFunction(const IRPosition &IRP) : AAHeapToStackImpl(IRP) {}
4820 
4821   /// See AbstractAttribute::trackStatistics().
4822   void trackStatistics() const override {
4823     STATS_DECL(
4824         MallocCalls, Function,
4825         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4826     for (auto *C : MallocCalls)
4827       if (!BadMallocCalls.count(C))
4828         ++BUILD_STAT_NAME(MallocCalls, Function);
4829   }
4830 };
4831 
4832 /// ----------------------- Privatizable Pointers ------------------------------
4833 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4834   AAPrivatizablePtrImpl(const IRPosition &IRP)
4835       : AAPrivatizablePtr(IRP), PrivatizableType(llvm::None) {}
4836 
4837   ChangeStatus indicatePessimisticFixpoint() override {
4838     AAPrivatizablePtr::indicatePessimisticFixpoint();
4839     PrivatizableType = nullptr;
4840     return ChangeStatus::CHANGED;
4841   }
4842 
4843   /// Identify the type we can chose for a private copy of the underlying
4844   /// argument. None means it is not clear yet, nullptr means there is none.
4845   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4846 
4847   /// Return a privatizable type that encloses both T0 and T1.
4848   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4849   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4850     if (!T0.hasValue())
4851       return T1;
4852     if (!T1.hasValue())
4853       return T0;
4854     if (T0 == T1)
4855       return T0;
4856     return nullptr;
4857   }
4858 
4859   Optional<Type *> getPrivatizableType() const override {
4860     return PrivatizableType;
4861   }
4862 
4863   const std::string getAsStr() const override {
4864     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4865   }
4866 
4867 protected:
4868   Optional<Type *> PrivatizableType;
4869 };
4870 
4871 // TODO: Do this for call site arguments (probably also other values) as well.
4872 
4873 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4874   AAPrivatizablePtrArgument(const IRPosition &IRP)
4875       : AAPrivatizablePtrImpl(IRP) {}
4876 
4877   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4878   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4879     // If this is a byval argument and we know all the call sites (so we can
4880     // rewrite them), there is no need to check them explicitly.
4881     bool AllCallSitesKnown;
4882     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4883         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4884                                true, AllCallSitesKnown))
4885       return getAssociatedValue().getType()->getPointerElementType();
4886 
4887     Optional<Type *> Ty;
4888     unsigned ArgNo = getIRPosition().getArgNo();
4889 
4890     // Make sure the associated call site argument has the same type at all call
4891     // sites and it is an allocation we know is safe to privatize, for now that
4892     // means we only allow alloca instructions.
4893     // TODO: We can additionally analyze the accesses in the callee to  create
4894     //       the type from that information instead. That is a little more
4895     //       involved and will be done in a follow up patch.
4896     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4897       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4898       // Check if a coresponding argument was found or if it is one not
4899       // associated (which can happen for callback calls).
4900       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4901         return false;
4902 
4903       // Check that all call sites agree on a type.
4904       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4905       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4906 
4907       LLVM_DEBUG({
4908         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4909         if (CSTy.hasValue() && CSTy.getValue())
4910           CSTy.getValue()->print(dbgs());
4911         else if (CSTy.hasValue())
4912           dbgs() << "<nullptr>";
4913         else
4914           dbgs() << "<none>";
4915       });
4916 
4917       Ty = combineTypes(Ty, CSTy);
4918 
4919       LLVM_DEBUG({
4920         dbgs() << " : New Type: ";
4921         if (Ty.hasValue() && Ty.getValue())
4922           Ty.getValue()->print(dbgs());
4923         else if (Ty.hasValue())
4924           dbgs() << "<nullptr>";
4925         else
4926           dbgs() << "<none>";
4927         dbgs() << "\n";
4928       });
4929 
4930       return !Ty.hasValue() || Ty.getValue();
4931     };
4932 
4933     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4934       return nullptr;
4935     return Ty;
4936   }
4937 
4938   /// See AbstractAttribute::updateImpl(...).
4939   ChangeStatus updateImpl(Attributor &A) override {
4940     PrivatizableType = identifyPrivatizableType(A);
4941     if (!PrivatizableType.hasValue())
4942       return ChangeStatus::UNCHANGED;
4943     if (!PrivatizableType.getValue())
4944       return indicatePessimisticFixpoint();
4945 
4946     // Avoid arguments with padding for now.
4947     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4948         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4949                                                 A.getInfoCache().getDL())) {
4950       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4951       return indicatePessimisticFixpoint();
4952     }
4953 
4954     // Verify callee and caller agree on how the promoted argument would be
4955     // passed.
4956     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
4957     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
4958     // which doesn't require the arguments ArgumentPromotion wanted to pass.
4959     Function &Fn = *getIRPosition().getAnchorScope();
4960     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
4961     ArgsToPromote.insert(getAssociatedArgument());
4962     const auto *TTI =
4963         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
4964     if (!TTI ||
4965         !ArgumentPromotionPass::areFunctionArgsABICompatible(
4966             Fn, *TTI, ArgsToPromote, Dummy) ||
4967         ArgsToPromote.empty()) {
4968       LLVM_DEBUG(
4969           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
4970                  << Fn.getName() << "\n");
4971       return indicatePessimisticFixpoint();
4972     }
4973 
4974     // Collect the types that will replace the privatizable type in the function
4975     // signature.
4976     SmallVector<Type *, 16> ReplacementTypes;
4977     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
4978 
4979     // Register a rewrite of the argument.
4980     Argument *Arg = getAssociatedArgument();
4981     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
4982       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
4983       return indicatePessimisticFixpoint();
4984     }
4985 
4986     unsigned ArgNo = Arg->getArgNo();
4987 
4988     // Helper to check if for the given call site the associated argument is
4989     // passed to a callback where the privatization would be different.
4990     auto IsCompatiblePrivArgOfCallback = [&](CallSite CS) {
4991       SmallVector<const Use *, 4> CBUses;
4992       AbstractCallSite::getCallbackUses(CS, CBUses);
4993       for (const Use *U : CBUses) {
4994         AbstractCallSite CBACS(U);
4995         assert(CBACS && CBACS.isCallbackCall());
4996         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
4997           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
4998 
4999           LLVM_DEBUG({
5000             dbgs()
5001                 << "[AAPrivatizablePtr] Argument " << *Arg
5002                 << "check if can be privatized in the context of its parent ("
5003                 << Arg->getParent()->getName()
5004                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5005                    "callback ("
5006                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5007                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5008                 << CBACS.getCallArgOperand(CBArg) << " vs "
5009                 << CS.getArgOperand(ArgNo) << "\n"
5010                 << "[AAPrivatizablePtr] " << CBArg << " : "
5011                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5012           });
5013 
5014           if (CBArgNo != int(ArgNo))
5015             continue;
5016           const auto &CBArgPrivAA =
5017               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5018           if (CBArgPrivAA.isValidState()) {
5019             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5020             if (!CBArgPrivTy.hasValue())
5021               continue;
5022             if (CBArgPrivTy.getValue() == PrivatizableType)
5023               continue;
5024           }
5025 
5026           LLVM_DEBUG({
5027             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5028                    << " cannot be privatized in the context of its parent ("
5029                    << Arg->getParent()->getName()
5030                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5031                       "callback ("
5032                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5033                    << ").\n[AAPrivatizablePtr] for which the argument "
5034                       "privatization is not compatible.\n";
5035           });
5036           return false;
5037         }
5038       }
5039       return true;
5040     };
5041 
5042     // Helper to check if for the given call site the associated argument is
5043     // passed to a direct call where the privatization would be different.
5044     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5045       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5046       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5047       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5048              "Expected a direct call operand for callback call operand");
5049 
5050       LLVM_DEBUG({
5051         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5052                << " check if be privatized in the context of its parent ("
5053                << Arg->getParent()->getName()
5054                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5055                   "direct call of ("
5056                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5057                << ").\n";
5058       });
5059 
5060       Function *DCCallee = DC->getCalledFunction();
5061       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5062         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5063             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5064         if (DCArgPrivAA.isValidState()) {
5065           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5066           if (!DCArgPrivTy.hasValue())
5067             return true;
5068           if (DCArgPrivTy.getValue() == PrivatizableType)
5069             return true;
5070         }
5071       }
5072 
5073       LLVM_DEBUG({
5074         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5075                << " cannot be privatized in the context of its parent ("
5076                << Arg->getParent()->getName()
5077                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5078                   "direct call of ("
5079                << ACS.getCallSite().getCalledFunction()->getName()
5080                << ").\n[AAPrivatizablePtr] for which the argument "
5081                   "privatization is not compatible.\n";
5082       });
5083       return false;
5084     };
5085 
5086     // Helper to check if the associated argument is used at the given abstract
5087     // call site in a way that is incompatible with the privatization assumed
5088     // here.
5089     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5090       if (ACS.isDirectCall())
5091         return IsCompatiblePrivArgOfCallback(ACS.getCallSite());
5092       if (ACS.isCallbackCall())
5093         return IsCompatiblePrivArgOfDirectCS(ACS);
5094       return false;
5095     };
5096 
5097     bool AllCallSitesKnown;
5098     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5099                                 AllCallSitesKnown))
5100       return indicatePessimisticFixpoint();
5101 
5102     return ChangeStatus::UNCHANGED;
5103   }
5104 
5105   /// Given a type to private \p PrivType, collect the constituates (which are
5106   /// used) in \p ReplacementTypes.
5107   static void
5108   identifyReplacementTypes(Type *PrivType,
5109                            SmallVectorImpl<Type *> &ReplacementTypes) {
5110     // TODO: For now we expand the privatization type to the fullest which can
5111     //       lead to dead arguments that need to be removed later.
5112     assert(PrivType && "Expected privatizable type!");
5113 
5114     // Traverse the type, extract constituate types on the outermost level.
5115     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5116       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5117         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5118     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5119       ReplacementTypes.append(PrivArrayType->getNumElements(),
5120                               PrivArrayType->getElementType());
5121     } else {
5122       ReplacementTypes.push_back(PrivType);
5123     }
5124   }
5125 
5126   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5127   /// The values needed are taken from the arguments of \p F starting at
5128   /// position \p ArgNo.
5129   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5130                                    unsigned ArgNo, Instruction &IP) {
5131     assert(PrivType && "Expected privatizable type!");
5132 
5133     IRBuilder<NoFolder> IRB(&IP);
5134     const DataLayout &DL = F.getParent()->getDataLayout();
5135 
5136     // Traverse the type, build GEPs and stores.
5137     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5138       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5139       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5140         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5141         Value *Ptr = constructPointer(
5142             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5143         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5144       }
5145     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5146       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5147       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5148       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5149         Value *Ptr =
5150             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5151         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5152       }
5153     } else {
5154       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5155     }
5156   }
5157 
5158   /// Extract values from \p Base according to the type \p PrivType at the
5159   /// call position \p ACS. The values are appended to \p ReplacementValues.
5160   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5161                                Value *Base,
5162                                SmallVectorImpl<Value *> &ReplacementValues) {
5163     assert(Base && "Expected base value!");
5164     assert(PrivType && "Expected privatizable type!");
5165     Instruction *IP = ACS.getInstruction();
5166 
5167     IRBuilder<NoFolder> IRB(IP);
5168     const DataLayout &DL = IP->getModule()->getDataLayout();
5169 
5170     if (Base->getType()->getPointerElementType() != PrivType)
5171       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5172                                                  "", ACS.getInstruction());
5173 
5174     // TODO: Improve the alignment of the loads.
5175     // Traverse the type, build GEPs and loads.
5176     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5177       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5178       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5179         Type *PointeeTy = PrivStructType->getElementType(u);
5180         Value *Ptr =
5181             constructPointer(PointeeTy->getPointerTo(), Base,
5182                              PrivStructLayout->getElementOffset(u), IRB, DL);
5183         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5184         L->setAlignment(Align(1));
5185         ReplacementValues.push_back(L);
5186       }
5187     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5188       Type *PointeeTy = PrivArrayType->getElementType();
5189       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5190       Type *PointeePtrTy = PointeeTy->getPointerTo();
5191       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5192         Value *Ptr =
5193             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5194         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5195         L->setAlignment(Align(1));
5196         ReplacementValues.push_back(L);
5197       }
5198     } else {
5199       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5200       L->setAlignment(Align(1));
5201       ReplacementValues.push_back(L);
5202     }
5203   }
5204 
5205   /// See AbstractAttribute::manifest(...)
5206   ChangeStatus manifest(Attributor &A) override {
5207     if (!PrivatizableType.hasValue())
5208       return ChangeStatus::UNCHANGED;
5209     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5210 
5211     // Collect all tail calls in the function as we cannot allow new allocas to
5212     // escape into tail recursion.
5213     // TODO: Be smarter about new allocas escaping into tail calls.
5214     SmallVector<CallInst *, 16> TailCalls;
5215     if (!A.checkForAllInstructions(
5216             [&](Instruction &I) {
5217               CallInst &CI = cast<CallInst>(I);
5218               if (CI.isTailCall())
5219                 TailCalls.push_back(&CI);
5220               return true;
5221             },
5222             *this, {Instruction::Call}))
5223       return ChangeStatus::UNCHANGED;
5224 
5225     Argument *Arg = getAssociatedArgument();
5226 
5227     // Callback to repair the associated function. A new alloca is placed at the
5228     // beginning and initialized with the values passed through arguments. The
5229     // new alloca replaces the use of the old pointer argument.
5230     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5231         [=](const Attributor::ArgumentReplacementInfo &ARI,
5232             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5233           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5234           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5235           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5236                                     Arg->getName() + ".priv", IP);
5237           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5238                                ArgIt->getArgNo(), *IP);
5239           Arg->replaceAllUsesWith(AI);
5240 
5241           for (CallInst *CI : TailCalls)
5242             CI->setTailCall(false);
5243         };
5244 
5245     // Callback to repair a call site of the associated function. The elements
5246     // of the privatizable type are loaded prior to the call and passed to the
5247     // new function version.
5248     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5249         [=](const Attributor::ArgumentReplacementInfo &ARI,
5250             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5251           createReplacementValues(
5252               PrivatizableType.getValue(), ACS,
5253               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5254               NewArgOperands);
5255         };
5256 
5257     // Collect the types that will replace the privatizable type in the function
5258     // signature.
5259     SmallVector<Type *, 16> ReplacementTypes;
5260     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5261 
5262     // Register a rewrite of the argument.
5263     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5264                                            std::move(FnRepairCB),
5265                                            std::move(ACSRepairCB)))
5266       return ChangeStatus::CHANGED;
5267     return ChangeStatus::UNCHANGED;
5268   }
5269 
5270   /// See AbstractAttribute::trackStatistics()
5271   void trackStatistics() const override {
5272     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5273   }
5274 };
5275 
5276 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5277   AAPrivatizablePtrFloating(const IRPosition &IRP)
5278       : AAPrivatizablePtrImpl(IRP) {}
5279 
5280   /// See AbstractAttribute::initialize(...).
5281   virtual void initialize(Attributor &A) override {
5282     // TODO: We can privatize more than arguments.
5283     indicatePessimisticFixpoint();
5284   }
5285 
5286   ChangeStatus updateImpl(Attributor &A) override {
5287     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5288                      "updateImpl will not be called");
5289   }
5290 
5291   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5292   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5293     Value *Obj =
5294         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5295     if (!Obj) {
5296       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5297       return nullptr;
5298     }
5299 
5300     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5301       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5302         if (CI->isOne())
5303           return Obj->getType()->getPointerElementType();
5304     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5305       auto &PrivArgAA =
5306           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5307       if (PrivArgAA.isAssumedPrivatizablePtr())
5308         return Obj->getType()->getPointerElementType();
5309     }
5310 
5311     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5312                          "alloca nor privatizable argument: "
5313                       << *Obj << "!\n");
5314     return nullptr;
5315   }
5316 
5317   /// See AbstractAttribute::trackStatistics()
5318   void trackStatistics() const override {
5319     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5320   }
5321 };
5322 
5323 struct AAPrivatizablePtrCallSiteArgument final
5324     : public AAPrivatizablePtrFloating {
5325   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP)
5326       : AAPrivatizablePtrFloating(IRP) {}
5327 
5328   /// See AbstractAttribute::initialize(...).
5329   void initialize(Attributor &A) override {
5330     if (getIRPosition().hasAttr(Attribute::ByVal))
5331       indicateOptimisticFixpoint();
5332   }
5333 
5334   /// See AbstractAttribute::updateImpl(...).
5335   ChangeStatus updateImpl(Attributor &A) override {
5336     PrivatizableType = identifyPrivatizableType(A);
5337     if (!PrivatizableType.hasValue())
5338       return ChangeStatus::UNCHANGED;
5339     if (!PrivatizableType.getValue())
5340       return indicatePessimisticFixpoint();
5341 
5342     const IRPosition &IRP = getIRPosition();
5343     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5344     if (!NoCaptureAA.isAssumedNoCapture()) {
5345       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5346       return indicatePessimisticFixpoint();
5347     }
5348 
5349     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5350     if (!NoAliasAA.isAssumedNoAlias()) {
5351       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5352       return indicatePessimisticFixpoint();
5353     }
5354 
5355     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5356     if (!MemBehaviorAA.isAssumedReadOnly()) {
5357       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5358       return indicatePessimisticFixpoint();
5359     }
5360 
5361     return ChangeStatus::UNCHANGED;
5362   }
5363 
5364   /// See AbstractAttribute::trackStatistics()
5365   void trackStatistics() const override {
5366     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5367   }
5368 };
5369 
5370 struct AAPrivatizablePtrCallSiteReturned final
5371     : public AAPrivatizablePtrFloating {
5372   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP)
5373       : AAPrivatizablePtrFloating(IRP) {}
5374 
5375   /// See AbstractAttribute::initialize(...).
5376   void initialize(Attributor &A) override {
5377     // TODO: We can privatize more than arguments.
5378     indicatePessimisticFixpoint();
5379   }
5380 
5381   /// See AbstractAttribute::trackStatistics()
5382   void trackStatistics() const override {
5383     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5384   }
5385 };
5386 
5387 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5388   AAPrivatizablePtrReturned(const IRPosition &IRP)
5389       : AAPrivatizablePtrFloating(IRP) {}
5390 
5391   /// See AbstractAttribute::initialize(...).
5392   void initialize(Attributor &A) override {
5393     // TODO: We can privatize more than arguments.
5394     indicatePessimisticFixpoint();
5395   }
5396 
5397   /// See AbstractAttribute::trackStatistics()
5398   void trackStatistics() const override {
5399     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5400   }
5401 };
5402 
5403 /// -------------------- Memory Behavior Attributes ----------------------------
5404 /// Includes read-none, read-only, and write-only.
5405 /// ----------------------------------------------------------------------------
5406 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5407   AAMemoryBehaviorImpl(const IRPosition &IRP) : AAMemoryBehavior(IRP) {}
5408 
5409   /// See AbstractAttribute::initialize(...).
5410   void initialize(Attributor &A) override {
5411     intersectAssumedBits(BEST_STATE);
5412     getKnownStateFromValue(getIRPosition(), getState());
5413     IRAttribute::initialize(A);
5414   }
5415 
5416   /// Return the memory behavior information encoded in the IR for \p IRP.
5417   static void getKnownStateFromValue(const IRPosition &IRP,
5418                                      BitIntegerState &State,
5419                                      bool IgnoreSubsumingPositions = false) {
5420     SmallVector<Attribute, 2> Attrs;
5421     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5422     for (const Attribute &Attr : Attrs) {
5423       switch (Attr.getKindAsEnum()) {
5424       case Attribute::ReadNone:
5425         State.addKnownBits(NO_ACCESSES);
5426         break;
5427       case Attribute::ReadOnly:
5428         State.addKnownBits(NO_WRITES);
5429         break;
5430       case Attribute::WriteOnly:
5431         State.addKnownBits(NO_READS);
5432         break;
5433       default:
5434         llvm_unreachable("Unexpected attribute!");
5435       }
5436     }
5437 
5438     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5439       if (!I->mayReadFromMemory())
5440         State.addKnownBits(NO_READS);
5441       if (!I->mayWriteToMemory())
5442         State.addKnownBits(NO_WRITES);
5443     }
5444   }
5445 
5446   /// See AbstractAttribute::getDeducedAttributes(...).
5447   void getDeducedAttributes(LLVMContext &Ctx,
5448                             SmallVectorImpl<Attribute> &Attrs) const override {
5449     assert(Attrs.size() == 0);
5450     if (isAssumedReadNone())
5451       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5452     else if (isAssumedReadOnly())
5453       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5454     else if (isAssumedWriteOnly())
5455       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5456     assert(Attrs.size() <= 1);
5457   }
5458 
5459   /// See AbstractAttribute::manifest(...).
5460   ChangeStatus manifest(Attributor &A) override {
5461     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5462       return ChangeStatus::UNCHANGED;
5463 
5464     const IRPosition &IRP = getIRPosition();
5465 
5466     // Check if we would improve the existing attributes first.
5467     SmallVector<Attribute, 4> DeducedAttrs;
5468     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5469     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5470           return IRP.hasAttr(Attr.getKindAsEnum(),
5471                              /* IgnoreSubsumingPositions */ true);
5472         }))
5473       return ChangeStatus::UNCHANGED;
5474 
5475     // Clear existing attributes.
5476     IRP.removeAttrs(AttrKinds);
5477 
5478     // Use the generic manifest method.
5479     return IRAttribute::manifest(A);
5480   }
5481 
5482   /// See AbstractState::getAsStr().
5483   const std::string getAsStr() const override {
5484     if (isAssumedReadNone())
5485       return "readnone";
5486     if (isAssumedReadOnly())
5487       return "readonly";
5488     if (isAssumedWriteOnly())
5489       return "writeonly";
5490     return "may-read/write";
5491   }
5492 
5493   /// The set of IR attributes AAMemoryBehavior deals with.
5494   static const Attribute::AttrKind AttrKinds[3];
5495 };
5496 
5497 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5498     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5499 
5500 /// Memory behavior attribute for a floating value.
5501 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5502   AAMemoryBehaviorFloating(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5503 
5504   /// See AbstractAttribute::initialize(...).
5505   void initialize(Attributor &A) override {
5506     AAMemoryBehaviorImpl::initialize(A);
5507     // Initialize the use vector with all direct uses of the associated value.
5508     for (const Use &U : getAssociatedValue().uses())
5509       Uses.insert(&U);
5510   }
5511 
5512   /// See AbstractAttribute::updateImpl(...).
5513   ChangeStatus updateImpl(Attributor &A) override;
5514 
5515   /// See AbstractAttribute::trackStatistics()
5516   void trackStatistics() const override {
5517     if (isAssumedReadNone())
5518       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5519     else if (isAssumedReadOnly())
5520       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5521     else if (isAssumedWriteOnly())
5522       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5523   }
5524 
5525 private:
5526   /// Return true if users of \p UserI might access the underlying
5527   /// variable/location described by \p U and should therefore be analyzed.
5528   bool followUsersOfUseIn(Attributor &A, const Use *U,
5529                           const Instruction *UserI);
5530 
5531   /// Update the state according to the effect of use \p U in \p UserI.
5532   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5533 
5534 protected:
5535   /// Container for (transitive) uses of the associated argument.
5536   SetVector<const Use *> Uses;
5537 };
5538 
5539 /// Memory behavior attribute for function argument.
5540 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5541   AAMemoryBehaviorArgument(const IRPosition &IRP)
5542       : AAMemoryBehaviorFloating(IRP) {}
5543 
5544   /// See AbstractAttribute::initialize(...).
5545   void initialize(Attributor &A) override {
5546     intersectAssumedBits(BEST_STATE);
5547     const IRPosition &IRP = getIRPosition();
5548     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5549     // can query it when we use has/getAttr. That would allow us to reuse the
5550     // initialize of the base class here.
5551     bool HasByVal =
5552         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5553     getKnownStateFromValue(IRP, getState(),
5554                            /* IgnoreSubsumingPositions */ HasByVal);
5555 
5556     // Initialize the use vector with all direct uses of the associated value.
5557     Argument *Arg = getAssociatedArgument();
5558     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5559       indicatePessimisticFixpoint();
5560     } else {
5561       // Initialize the use vector with all direct uses of the associated value.
5562       for (const Use &U : Arg->uses())
5563         Uses.insert(&U);
5564     }
5565   }
5566 
5567   ChangeStatus manifest(Attributor &A) override {
5568     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5569     if (!getAssociatedValue().getType()->isPointerTy())
5570       return ChangeStatus::UNCHANGED;
5571 
5572     // TODO: From readattrs.ll: "inalloca parameters are always
5573     //                           considered written"
5574     if (hasAttr({Attribute::InAlloca})) {
5575       removeKnownBits(NO_WRITES);
5576       removeAssumedBits(NO_WRITES);
5577     }
5578     return AAMemoryBehaviorFloating::manifest(A);
5579   }
5580 
5581   /// See AbstractAttribute::trackStatistics()
5582   void trackStatistics() const override {
5583     if (isAssumedReadNone())
5584       STATS_DECLTRACK_ARG_ATTR(readnone)
5585     else if (isAssumedReadOnly())
5586       STATS_DECLTRACK_ARG_ATTR(readonly)
5587     else if (isAssumedWriteOnly())
5588       STATS_DECLTRACK_ARG_ATTR(writeonly)
5589   }
5590 };
5591 
5592 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5593   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP)
5594       : AAMemoryBehaviorArgument(IRP) {}
5595 
5596   /// See AbstractAttribute::initialize(...).
5597   void initialize(Attributor &A) override {
5598     if (Argument *Arg = getAssociatedArgument()) {
5599       if (Arg->hasByValAttr()) {
5600         addKnownBits(NO_WRITES);
5601         removeKnownBits(NO_READS);
5602         removeAssumedBits(NO_READS);
5603       }
5604     } else {
5605     }
5606     AAMemoryBehaviorArgument::initialize(A);
5607   }
5608 
5609   /// See AbstractAttribute::updateImpl(...).
5610   ChangeStatus updateImpl(Attributor &A) override {
5611     // TODO: Once we have call site specific value information we can provide
5612     //       call site specific liveness liveness information and then it makes
5613     //       sense to specialize attributes for call sites arguments instead of
5614     //       redirecting requests to the callee argument.
5615     Argument *Arg = getAssociatedArgument();
5616     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5617     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5618     return clampStateAndIndicateChange(
5619         getState(),
5620         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5621   }
5622 
5623   /// See AbstractAttribute::trackStatistics()
5624   void trackStatistics() const override {
5625     if (isAssumedReadNone())
5626       STATS_DECLTRACK_CSARG_ATTR(readnone)
5627     else if (isAssumedReadOnly())
5628       STATS_DECLTRACK_CSARG_ATTR(readonly)
5629     else if (isAssumedWriteOnly())
5630       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5631   }
5632 };
5633 
5634 /// Memory behavior attribute for a call site return position.
5635 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5636   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP)
5637       : AAMemoryBehaviorFloating(IRP) {}
5638 
5639   /// See AbstractAttribute::manifest(...).
5640   ChangeStatus manifest(Attributor &A) override {
5641     // We do not annotate returned values.
5642     return ChangeStatus::UNCHANGED;
5643   }
5644 
5645   /// See AbstractAttribute::trackStatistics()
5646   void trackStatistics() const override {}
5647 };
5648 
5649 /// An AA to represent the memory behavior function attributes.
5650 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5651   AAMemoryBehaviorFunction(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5652 
5653   /// See AbstractAttribute::updateImpl(Attributor &A).
5654   virtual ChangeStatus updateImpl(Attributor &A) override;
5655 
5656   /// See AbstractAttribute::manifest(...).
5657   ChangeStatus manifest(Attributor &A) override {
5658     Function &F = cast<Function>(getAnchorValue());
5659     if (isAssumedReadNone()) {
5660       F.removeFnAttr(Attribute::ArgMemOnly);
5661       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5662       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5663     }
5664     return AAMemoryBehaviorImpl::manifest(A);
5665   }
5666 
5667   /// See AbstractAttribute::trackStatistics()
5668   void trackStatistics() const override {
5669     if (isAssumedReadNone())
5670       STATS_DECLTRACK_FN_ATTR(readnone)
5671     else if (isAssumedReadOnly())
5672       STATS_DECLTRACK_FN_ATTR(readonly)
5673     else if (isAssumedWriteOnly())
5674       STATS_DECLTRACK_FN_ATTR(writeonly)
5675   }
5676 };
5677 
5678 /// AAMemoryBehavior attribute for call sites.
5679 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5680   AAMemoryBehaviorCallSite(const IRPosition &IRP) : AAMemoryBehaviorImpl(IRP) {}
5681 
5682   /// See AbstractAttribute::initialize(...).
5683   void initialize(Attributor &A) override {
5684     AAMemoryBehaviorImpl::initialize(A);
5685     Function *F = getAssociatedFunction();
5686     if (!F || !A.isFunctionIPOAmendable(*F))
5687       indicatePessimisticFixpoint();
5688   }
5689 
5690   /// See AbstractAttribute::updateImpl(...).
5691   ChangeStatus updateImpl(Attributor &A) override {
5692     // TODO: Once we have call site specific value information we can provide
5693     //       call site specific liveness liveness information and then it makes
5694     //       sense to specialize attributes for call sites arguments instead of
5695     //       redirecting requests to the callee argument.
5696     Function *F = getAssociatedFunction();
5697     const IRPosition &FnPos = IRPosition::function(*F);
5698     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5699     return clampStateAndIndicateChange(
5700         getState(),
5701         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5702   }
5703 
5704   /// See AbstractAttribute::trackStatistics()
5705   void trackStatistics() const override {
5706     if (isAssumedReadNone())
5707       STATS_DECLTRACK_CS_ATTR(readnone)
5708     else if (isAssumedReadOnly())
5709       STATS_DECLTRACK_CS_ATTR(readonly)
5710     else if (isAssumedWriteOnly())
5711       STATS_DECLTRACK_CS_ATTR(writeonly)
5712   }
5713 };
5714 
5715 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5716 
5717   // The current assumed state used to determine a change.
5718   auto AssumedState = getAssumed();
5719 
5720   auto CheckRWInst = [&](Instruction &I) {
5721     // If the instruction has an own memory behavior state, use it to restrict
5722     // the local state. No further analysis is required as the other memory
5723     // state is as optimistic as it gets.
5724     if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
5725       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5726           *this, IRPosition::callsite_function(ICS));
5727       intersectAssumedBits(MemBehaviorAA.getAssumed());
5728       return !isAtFixpoint();
5729     }
5730 
5731     // Remove access kind modifiers if necessary.
5732     if (I.mayReadFromMemory())
5733       removeAssumedBits(NO_READS);
5734     if (I.mayWriteToMemory())
5735       removeAssumedBits(NO_WRITES);
5736     return !isAtFixpoint();
5737   };
5738 
5739   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5740     return indicatePessimisticFixpoint();
5741 
5742   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5743                                         : ChangeStatus::UNCHANGED;
5744 }
5745 
5746 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5747 
5748   const IRPosition &IRP = getIRPosition();
5749   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5750   AAMemoryBehavior::StateType &S = getState();
5751 
5752   // First, check the function scope. We take the known information and we avoid
5753   // work if the assumed information implies the current assumed information for
5754   // this attribute. This is a valid for all but byval arguments.
5755   Argument *Arg = IRP.getAssociatedArgument();
5756   AAMemoryBehavior::base_t FnMemAssumedState =
5757       AAMemoryBehavior::StateType::getWorstState();
5758   if (!Arg || !Arg->hasByValAttr()) {
5759     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5760         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5761     FnMemAssumedState = FnMemAA.getAssumed();
5762     S.addKnownBits(FnMemAA.getKnown());
5763     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5764       return ChangeStatus::UNCHANGED;
5765   }
5766 
5767   // Make sure the value is not captured (except through "return"), if
5768   // it is, any information derived would be irrelevant anyway as we cannot
5769   // check the potential aliases introduced by the capture. However, no need
5770   // to fall back to anythign less optimistic than the function state.
5771   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5772       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5773   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5774     S.intersectAssumedBits(FnMemAssumedState);
5775     return ChangeStatus::CHANGED;
5776   }
5777 
5778   // The current assumed state used to determine a change.
5779   auto AssumedState = S.getAssumed();
5780 
5781   // Liveness information to exclude dead users.
5782   // TODO: Take the FnPos once we have call site specific liveness information.
5783   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5784       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5785       /* TrackDependence */ false);
5786 
5787   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5788   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5789     const Use *U = Uses[i];
5790     Instruction *UserI = cast<Instruction>(U->getUser());
5791     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5792                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5793                       << "]\n");
5794     if (A.isAssumedDead(*U, this, &LivenessAA))
5795       continue;
5796 
5797     // Droppable users, e.g., llvm::assume does not actually perform any action.
5798     if (UserI->isDroppable())
5799       continue;
5800 
5801     // Check if the users of UserI should also be visited.
5802     if (followUsersOfUseIn(A, U, UserI))
5803       for (const Use &UserIUse : UserI->uses())
5804         Uses.insert(&UserIUse);
5805 
5806     // If UserI might touch memory we analyze the use in detail.
5807     if (UserI->mayReadOrWriteMemory())
5808       analyzeUseIn(A, U, UserI);
5809   }
5810 
5811   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5812                                         : ChangeStatus::UNCHANGED;
5813 }
5814 
5815 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5816                                                   const Instruction *UserI) {
5817   // The loaded value is unrelated to the pointer argument, no need to
5818   // follow the users of the load.
5819   if (isa<LoadInst>(UserI))
5820     return false;
5821 
5822   // By default we follow all uses assuming UserI might leak information on U,
5823   // we have special handling for call sites operands though.
5824   ImmutableCallSite ICS(UserI);
5825   if (!ICS || !ICS.isArgOperand(U))
5826     return true;
5827 
5828   // If the use is a call argument known not to be captured, the users of
5829   // the call do not need to be visited because they have to be unrelated to
5830   // the input. Note that this check is not trivial even though we disallow
5831   // general capturing of the underlying argument. The reason is that the
5832   // call might the argument "through return", which we allow and for which we
5833   // need to check call users.
5834   if (U->get()->getType()->isPointerTy()) {
5835     unsigned ArgNo = ICS.getArgumentNo(U);
5836     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5837         *this, IRPosition::callsite_argument(ICS, ArgNo),
5838         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5839     return !ArgNoCaptureAA.isAssumedNoCapture();
5840   }
5841 
5842   return true;
5843 }
5844 
5845 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5846                                             const Instruction *UserI) {
5847   assert(UserI->mayReadOrWriteMemory());
5848 
5849   switch (UserI->getOpcode()) {
5850   default:
5851     // TODO: Handle all atomics and other side-effect operations we know of.
5852     break;
5853   case Instruction::Load:
5854     // Loads cause the NO_READS property to disappear.
5855     removeAssumedBits(NO_READS);
5856     return;
5857 
5858   case Instruction::Store:
5859     // Stores cause the NO_WRITES property to disappear if the use is the
5860     // pointer operand. Note that we do assume that capturing was taken care of
5861     // somewhere else.
5862     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5863       removeAssumedBits(NO_WRITES);
5864     return;
5865 
5866   case Instruction::Call:
5867   case Instruction::CallBr:
5868   case Instruction::Invoke: {
5869     // For call sites we look at the argument memory behavior attribute (this
5870     // could be recursive!) in order to restrict our own state.
5871     ImmutableCallSite ICS(UserI);
5872 
5873     // Give up on operand bundles.
5874     if (ICS.isBundleOperand(U)) {
5875       indicatePessimisticFixpoint();
5876       return;
5877     }
5878 
5879     // Calling a function does read the function pointer, maybe write it if the
5880     // function is self-modifying.
5881     if (ICS.isCallee(U)) {
5882       removeAssumedBits(NO_READS);
5883       break;
5884     }
5885 
5886     // Adjust the possible access behavior based on the information on the
5887     // argument.
5888     IRPosition Pos;
5889     if (U->get()->getType()->isPointerTy())
5890       Pos = IRPosition::callsite_argument(ICS, ICS.getArgumentNo(U));
5891     else
5892       Pos = IRPosition::callsite_function(ICS);
5893     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5894         *this, Pos,
5895         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5896     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5897     // and at least "known".
5898     intersectAssumedBits(MemBehaviorAA.getAssumed());
5899     return;
5900   }
5901   };
5902 
5903   // Generally, look at the "may-properties" and adjust the assumed state if we
5904   // did not trigger special handling before.
5905   if (UserI->mayReadFromMemory())
5906     removeAssumedBits(NO_READS);
5907   if (UserI->mayWriteToMemory())
5908     removeAssumedBits(NO_WRITES);
5909 }
5910 
5911 } // namespace
5912 
5913 /// -------------------- Memory Locations Attributes ---------------------------
5914 /// Includes read-none, argmemonly, inaccessiblememonly,
5915 /// inaccessiblememorargmemonly
5916 /// ----------------------------------------------------------------------------
5917 
5918 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5919     AAMemoryLocation::MemoryLocationsKind MLK) {
5920   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5921     return "all memory";
5922   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5923     return "no memory";
5924   std::string S = "memory:";
5925   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5926     S += "stack,";
5927   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5928     S += "constant,";
5929   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5930     S += "internal global,";
5931   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5932     S += "external global,";
5933   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5934     S += "argument,";
5935   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5936     S += "inaccessible,";
5937   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5938     S += "malloced,";
5939   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
5940     S += "unknown,";
5941   S.pop_back();
5942   return S;
5943 }
5944 
5945 struct AAMemoryLocationImpl : public AAMemoryLocation {
5946 
5947   AAMemoryLocationImpl(const IRPosition &IRP) : AAMemoryLocation(IRP) {}
5948 
5949   /// See AbstractAttribute::initialize(...).
5950   void initialize(Attributor &A) override {
5951     intersectAssumedBits(BEST_STATE);
5952     getKnownStateFromValue(getIRPosition(), getState());
5953     IRAttribute::initialize(A);
5954   }
5955 
5956   /// Return the memory behavior information encoded in the IR for \p IRP.
5957   static void getKnownStateFromValue(const IRPosition &IRP,
5958                                      BitIntegerState &State,
5959                                      bool IgnoreSubsumingPositions = false) {
5960     SmallVector<Attribute, 2> Attrs;
5961     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5962     for (const Attribute &Attr : Attrs) {
5963       switch (Attr.getKindAsEnum()) {
5964       case Attribute::ReadNone:
5965         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
5966         break;
5967       case Attribute::InaccessibleMemOnly:
5968         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
5969         break;
5970       case Attribute::ArgMemOnly:
5971         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
5972         break;
5973       case Attribute::InaccessibleMemOrArgMemOnly:
5974         State.addKnownBits(
5975             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
5976         break;
5977       default:
5978         llvm_unreachable("Unexpected attribute!");
5979       }
5980     }
5981   }
5982 
5983   /// See AbstractAttribute::getDeducedAttributes(...).
5984   void getDeducedAttributes(LLVMContext &Ctx,
5985                             SmallVectorImpl<Attribute> &Attrs) const override {
5986     assert(Attrs.size() == 0);
5987     if (isAssumedReadNone()) {
5988       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5989     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
5990       if (isAssumedInaccessibleMemOnly())
5991         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
5992       else if (isAssumedArgMemOnly())
5993         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
5994       else if (isAssumedInaccessibleOrArgMemOnly())
5995         Attrs.push_back(
5996             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
5997     }
5998     assert(Attrs.size() <= 1);
5999   }
6000 
6001   /// See AbstractAttribute::manifest(...).
6002   ChangeStatus manifest(Attributor &A) override {
6003     const IRPosition &IRP = getIRPosition();
6004 
6005     // Check if we would improve the existing attributes first.
6006     SmallVector<Attribute, 4> DeducedAttrs;
6007     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6008     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6009           return IRP.hasAttr(Attr.getKindAsEnum(),
6010                              /* IgnoreSubsumingPositions */ true);
6011         }))
6012       return ChangeStatus::UNCHANGED;
6013 
6014     // Clear existing attributes.
6015     IRP.removeAttrs(AttrKinds);
6016     if (isAssumedReadNone())
6017       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6018 
6019     // Use the generic manifest method.
6020     return IRAttribute::manifest(A);
6021   }
6022 
6023   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6024   bool checkForAllAccessesToMemoryKind(
6025       function_ref<bool(const Instruction *, const Value *, AccessKind,
6026                         MemoryLocationsKind)>
6027           Pred,
6028       MemoryLocationsKind RequestedMLK) const override {
6029     if (!isValidState())
6030       return false;
6031 
6032     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6033     if (AssumedMLK == NO_LOCATIONS)
6034       return true;
6035 
6036     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6037       if (CurMLK & RequestedMLK)
6038         continue;
6039 
6040       const auto &Accesses = AccessKindAccessesMap.lookup(CurMLK);
6041       for (const AccessInfo &AI : Accesses) {
6042         if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6043           return false;
6044       }
6045     }
6046 
6047     return true;
6048   }
6049 
6050   ChangeStatus indicatePessimisticFixpoint() override {
6051     // If we give up and indicate a pessimistic fixpoint this instruction will
6052     // become an access for all potential access kinds:
6053     // TODO: Add pointers for argmemonly and globals to improve the results of
6054     //       checkForAllAccessesToMemoryKind.
6055     bool Changed = false;
6056     MemoryLocationsKind KnownMLK = getKnown();
6057     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6058     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6059       if (!(CurMLK & KnownMLK))
6060         updateStateAndAccessesMap(getState(), AccessKindAccessesMap, CurMLK, I,
6061                                   nullptr, Changed);
6062     return AAMemoryLocation::indicatePessimisticFixpoint();
6063   }
6064 
6065 protected:
6066   /// Helper struct to tie together an instruction that has a read or write
6067   /// effect with the pointer it accesses (if any).
6068   struct AccessInfo {
6069 
6070     /// The instruction that caused the access.
6071     const Instruction *I;
6072 
6073     /// The base pointer that is accessed, or null if unknown.
6074     const Value *Ptr;
6075 
6076     /// The kind of access (read/write/read+write).
6077     AccessKind Kind;
6078 
6079     bool operator==(const AccessInfo &RHS) const {
6080       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6081     }
6082     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6083       if (LHS.I != RHS.I)
6084         return LHS.I < RHS.I;
6085       if (LHS.Ptr != RHS.Ptr)
6086         return LHS.Ptr < RHS.Ptr;
6087       if (LHS.Kind != RHS.Kind)
6088         return LHS.Kind < RHS.Kind;
6089       return false;
6090     }
6091   };
6092 
6093   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6094   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6095   using AccessKindAccessesMapTy =
6096       DenseMap<unsigned, SmallSet<AccessInfo, 8, AccessInfo>>;
6097   AccessKindAccessesMapTy AccessKindAccessesMap;
6098 
6099   /// Return the kind(s) of location that may be accessed by \p V.
6100   AAMemoryLocation::MemoryLocationsKind
6101   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6102 
6103   /// Update the state \p State and the AccessKindAccessesMap given that \p I is
6104   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6105   static void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6106                                         AccessKindAccessesMapTy &AccessMap,
6107                                         MemoryLocationsKind MLK,
6108                                         const Instruction *I, const Value *Ptr,
6109                                         bool &Changed) {
6110     // TODO: The kind should be determined at the call sites based on the
6111     // information we have there.
6112     AccessKind Kind = READ_WRITE;
6113     if (I) {
6114       Kind = I->mayReadFromMemory() ? READ : NONE;
6115       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6116     }
6117 
6118     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6119     Changed |= AccessMap[MLK].insert(AccessInfo{I, Ptr, Kind}).second;
6120     State.removeAssumedBits(MLK);
6121   }
6122 
6123   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6124   /// arguments, and update the state and access map accordingly.
6125   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6126                           AAMemoryLocation::StateType &State, bool &Changed);
6127 
6128   /// The set of IR attributes AAMemoryLocation deals with.
6129   static const Attribute::AttrKind AttrKinds[4];
6130 };
6131 
6132 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6133     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6134     Attribute::InaccessibleMemOrArgMemOnly};
6135 
6136 void AAMemoryLocationImpl::categorizePtrValue(
6137     Attributor &A, const Instruction &I, const Value &Ptr,
6138     AAMemoryLocation::StateType &State, bool &Changed) {
6139   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6140                     << Ptr << " ["
6141                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6142 
6143   auto StripGEPCB = [](Value *V) -> Value * {
6144     auto *GEP = dyn_cast<GEPOperator>(V);
6145     while (GEP) {
6146       V = GEP->getPointerOperand();
6147       GEP = dyn_cast<GEPOperator>(V);
6148     }
6149     return V;
6150   };
6151 
6152   auto VisitValueCB = [&](Value &V, const Instruction *,
6153                           AAMemoryLocation::StateType &T,
6154                           bool Stripped) -> bool {
6155     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6156     if (isa<UndefValue>(V))
6157       return true;
6158     if (auto *Arg = dyn_cast<Argument>(&V)) {
6159       if (Arg->hasByValAttr())
6160         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I,
6161                                   &V, Changed);
6162       else
6163         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_ARGUMENT_MEM, &I,
6164                                   &V, Changed);
6165       return true;
6166     }
6167     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6168       if (GV->hasLocalLinkage())
6169         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6170                                   NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6171       else
6172         updateStateAndAccessesMap(T, AccessKindAccessesMap,
6173                                   NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6174       return true;
6175     }
6176     if (isa<AllocaInst>(V)) {
6177       updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_LOCAL_MEM, &I, &V,
6178                                 Changed);
6179       return true;
6180     }
6181     if (ImmutableCallSite ICS = ImmutableCallSite(&V)) {
6182       const auto &NoAliasAA =
6183           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
6184       if (NoAliasAA.isAssumedNoAlias()) {
6185         updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_MALLOCED_MEM, &I,
6186                                   &V, Changed);
6187         return true;
6188       }
6189     }
6190 
6191     updateStateAndAccessesMap(T, AccessKindAccessesMap, NO_UNKOWN_MEM, &I, &V,
6192                               Changed);
6193     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6194                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6195                       << "\n");
6196     return true;
6197   };
6198 
6199   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6200           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6201           /* MaxValues */ 32, StripGEPCB)) {
6202     LLVM_DEBUG(
6203         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6204     updateStateAndAccessesMap(State, AccessKindAccessesMap, NO_UNKOWN_MEM, &I,
6205                               nullptr, Changed);
6206   } else {
6207     LLVM_DEBUG(
6208         dbgs()
6209         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6210         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6211   }
6212 }
6213 
6214 AAMemoryLocation::MemoryLocationsKind
6215 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6216                                                   bool &Changed) {
6217   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6218                     << I << "\n");
6219 
6220   AAMemoryLocation::StateType AccessedLocs;
6221   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6222 
6223   if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
6224 
6225     // First check if we assume any memory is access is visible.
6226     const auto &ICSMemLocationAA =
6227         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(ICS));
6228     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6229                       << " [" << ICSMemLocationAA << "]\n");
6230 
6231     if (ICSMemLocationAA.isAssumedReadNone())
6232       return NO_LOCATIONS;
6233 
6234     if (ICSMemLocationAA.isAssumedInaccessibleMemOnly()) {
6235       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap,
6236                                 NO_INACCESSIBLE_MEM, &I, nullptr, Changed);
6237       return AccessedLocs.getAssumed();
6238     }
6239 
6240     uint32_t ICSAssumedNotAccessedLocs =
6241         ICSMemLocationAA.getAssumedNotAccessedLocation();
6242 
6243     // Set the argmemonly and global bit as we handle them separately below.
6244     uint32_t ICSAssumedNotAccessedLocsNoArgMem =
6245         ICSAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6246 
6247     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6248       if (ICSAssumedNotAccessedLocsNoArgMem & CurMLK)
6249         continue;
6250       updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, CurMLK, &I,
6251                                 nullptr, Changed);
6252     }
6253 
6254     // Now handle global memory if it might be accessed.
6255     bool HasGlobalAccesses = !(ICSAssumedNotAccessedLocs & NO_GLOBAL_MEM);
6256     if (HasGlobalAccesses) {
6257       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6258                             AccessKind Kind, MemoryLocationsKind MLK) {
6259         updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, MLK, &I,
6260                                   Ptr, Changed);
6261         return true;
6262       };
6263       if (!ICSMemLocationAA.checkForAllAccessesToMemoryKind(
6264               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6265         return AccessedLocs.getWorstState();
6266     }
6267 
6268     LLVM_DEBUG(
6269         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6270                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6271 
6272     // Now handle argument memory if it might be accessed.
6273     bool HasArgAccesses = !(ICSAssumedNotAccessedLocs & NO_ARGUMENT_MEM);
6274     if (HasArgAccesses) {
6275       for (unsigned ArgNo = 0, e = ICS.getNumArgOperands(); ArgNo < e;
6276            ++ArgNo) {
6277 
6278         // Skip non-pointer arguments.
6279         const Value *ArgOp = ICS.getArgOperand(ArgNo);
6280         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6281           continue;
6282 
6283         // Skip readnone arguments.
6284         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(ICS, ArgNo);
6285         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6286             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6287 
6288         if (ArgOpMemLocationAA.isAssumedReadNone())
6289           continue;
6290 
6291         // Categorize potentially accessed pointer arguments as if there was an
6292         // access instruction with them as pointer.
6293         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6294       }
6295     }
6296 
6297     LLVM_DEBUG(
6298         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6299                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6300 
6301     return AccessedLocs.getAssumed();
6302   }
6303 
6304   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6305     LLVM_DEBUG(
6306         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6307                << I << " [" << *Ptr << "]\n");
6308     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6309     return AccessedLocs.getAssumed();
6310   }
6311 
6312   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6313                     << I << "\n");
6314   updateStateAndAccessesMap(AccessedLocs, AccessKindAccessesMap, NO_UNKOWN_MEM,
6315                             &I, nullptr, Changed);
6316   return AccessedLocs.getAssumed();
6317 }
6318 
6319 /// An AA to represent the memory behavior function attributes.
6320 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6321   AAMemoryLocationFunction(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6322 
6323   /// See AbstractAttribute::updateImpl(Attributor &A).
6324   virtual ChangeStatus updateImpl(Attributor &A) override {
6325 
6326     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6327         *this, getIRPosition(), /* TrackDependence */ false);
6328     if (MemBehaviorAA.isAssumedReadNone()) {
6329       if (MemBehaviorAA.isKnownReadNone())
6330         return indicateOptimisticFixpoint();
6331       assert(isAssumedReadNone() &&
6332              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6333       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6334       return ChangeStatus::UNCHANGED;
6335     }
6336 
6337     // The current assumed state used to determine a change.
6338     auto AssumedState = getAssumed();
6339     bool Changed = false;
6340 
6341     auto CheckRWInst = [&](Instruction &I) {
6342       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6343       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6344                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6345       removeAssumedBits(inverseLocation(MLK, false, false));
6346       return true;
6347     };
6348 
6349     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6350       return indicatePessimisticFixpoint();
6351 
6352     Changed |= AssumedState != getAssumed();
6353     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6354   }
6355 
6356   /// See AbstractAttribute::trackStatistics()
6357   void trackStatistics() const override {
6358     if (isAssumedReadNone())
6359       STATS_DECLTRACK_FN_ATTR(readnone)
6360     else if (isAssumedArgMemOnly())
6361       STATS_DECLTRACK_FN_ATTR(argmemonly)
6362     else if (isAssumedInaccessibleMemOnly())
6363       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6364     else if (isAssumedInaccessibleOrArgMemOnly())
6365       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6366   }
6367 };
6368 
6369 /// AAMemoryLocation attribute for call sites.
6370 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6371   AAMemoryLocationCallSite(const IRPosition &IRP) : AAMemoryLocationImpl(IRP) {}
6372 
6373   /// See AbstractAttribute::initialize(...).
6374   void initialize(Attributor &A) override {
6375     AAMemoryLocationImpl::initialize(A);
6376     Function *F = getAssociatedFunction();
6377     if (!F || !A.isFunctionIPOAmendable(*F))
6378       indicatePessimisticFixpoint();
6379   }
6380 
6381   /// See AbstractAttribute::updateImpl(...).
6382   ChangeStatus updateImpl(Attributor &A) override {
6383     // TODO: Once we have call site specific value information we can provide
6384     //       call site specific liveness liveness information and then it makes
6385     //       sense to specialize attributes for call sites arguments instead of
6386     //       redirecting requests to the callee argument.
6387     Function *F = getAssociatedFunction();
6388     const IRPosition &FnPos = IRPosition::function(*F);
6389     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6390     bool Changed = false;
6391     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6392                           AccessKind Kind, MemoryLocationsKind MLK) {
6393       updateStateAndAccessesMap(getState(), AccessKindAccessesMap, MLK, I, Ptr,
6394                                 Changed);
6395       return true;
6396     };
6397     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6398       return indicatePessimisticFixpoint();
6399     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6400   }
6401 
6402   /// See AbstractAttribute::trackStatistics()
6403   void trackStatistics() const override {
6404     if (isAssumedReadNone())
6405       STATS_DECLTRACK_CS_ATTR(readnone)
6406   }
6407 };
6408 
6409 /// ------------------ Value Constant Range Attribute -------------------------
6410 
6411 struct AAValueConstantRangeImpl : AAValueConstantRange {
6412   using StateType = IntegerRangeState;
6413   AAValueConstantRangeImpl(const IRPosition &IRP) : AAValueConstantRange(IRP) {}
6414 
6415   /// See AbstractAttribute::getAsStr().
6416   const std::string getAsStr() const override {
6417     std::string Str;
6418     llvm::raw_string_ostream OS(Str);
6419     OS << "range(" << getBitWidth() << ")<";
6420     getKnown().print(OS);
6421     OS << " / ";
6422     getAssumed().print(OS);
6423     OS << ">";
6424     return OS.str();
6425   }
6426 
6427   /// Helper function to get a SCEV expr for the associated value at program
6428   /// point \p I.
6429   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6430     if (!getAnchorScope())
6431       return nullptr;
6432 
6433     ScalarEvolution *SE =
6434         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6435             *getAnchorScope());
6436 
6437     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6438         *getAnchorScope());
6439 
6440     if (!SE || !LI)
6441       return nullptr;
6442 
6443     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6444     if (!I)
6445       return S;
6446 
6447     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6448   }
6449 
6450   /// Helper function to get a range from SCEV for the associated value at
6451   /// program point \p I.
6452   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6453                                          const Instruction *I = nullptr) const {
6454     if (!getAnchorScope())
6455       return getWorstState(getBitWidth());
6456 
6457     ScalarEvolution *SE =
6458         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6459             *getAnchorScope());
6460 
6461     const SCEV *S = getSCEV(A, I);
6462     if (!SE || !S)
6463       return getWorstState(getBitWidth());
6464 
6465     return SE->getUnsignedRange(S);
6466   }
6467 
6468   /// Helper function to get a range from LVI for the associated value at
6469   /// program point \p I.
6470   ConstantRange
6471   getConstantRangeFromLVI(Attributor &A,
6472                           const Instruction *CtxI = nullptr) const {
6473     if (!getAnchorScope())
6474       return getWorstState(getBitWidth());
6475 
6476     LazyValueInfo *LVI =
6477         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6478             *getAnchorScope());
6479 
6480     if (!LVI || !CtxI)
6481       return getWorstState(getBitWidth());
6482     return LVI->getConstantRange(&getAssociatedValue(),
6483                                  const_cast<BasicBlock *>(CtxI->getParent()),
6484                                  const_cast<Instruction *>(CtxI));
6485   }
6486 
6487   /// See AAValueConstantRange::getKnownConstantRange(..).
6488   ConstantRange
6489   getKnownConstantRange(Attributor &A,
6490                         const Instruction *CtxI = nullptr) const override {
6491     if (!CtxI || CtxI == getCtxI())
6492       return getKnown();
6493 
6494     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6495     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6496     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6497   }
6498 
6499   /// See AAValueConstantRange::getAssumedConstantRange(..).
6500   ConstantRange
6501   getAssumedConstantRange(Attributor &A,
6502                           const Instruction *CtxI = nullptr) const override {
6503     // TODO: Make SCEV use Attributor assumption.
6504     //       We may be able to bound a variable range via assumptions in
6505     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6506     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6507 
6508     if (!CtxI || CtxI == getCtxI())
6509       return getAssumed();
6510 
6511     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6512     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6513     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6514   }
6515 
6516   /// See AbstractAttribute::initialize(..).
6517   void initialize(Attributor &A) override {
6518     // Intersect a range given by SCEV.
6519     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6520 
6521     // Intersect a range given by LVI.
6522     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6523   }
6524 
6525   /// Helper function to create MDNode for range metadata.
6526   static MDNode *
6527   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6528                             const ConstantRange &AssumedConstantRange) {
6529     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6530                                   Ty, AssumedConstantRange.getLower())),
6531                               ConstantAsMetadata::get(ConstantInt::get(
6532                                   Ty, AssumedConstantRange.getUpper()))};
6533     return MDNode::get(Ctx, LowAndHigh);
6534   }
6535 
6536   /// Return true if \p Assumed is included in \p KnownRanges.
6537   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6538 
6539     if (Assumed.isFullSet())
6540       return false;
6541 
6542     if (!KnownRanges)
6543       return true;
6544 
6545     // If multiple ranges are annotated in IR, we give up to annotate assumed
6546     // range for now.
6547 
6548     // TODO:  If there exists a known range which containts assumed range, we
6549     // can say assumed range is better.
6550     if (KnownRanges->getNumOperands() > 2)
6551       return false;
6552 
6553     ConstantInt *Lower =
6554         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6555     ConstantInt *Upper =
6556         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6557 
6558     ConstantRange Known(Lower->getValue(), Upper->getValue());
6559     return Known.contains(Assumed) && Known != Assumed;
6560   }
6561 
6562   /// Helper function to set range metadata.
6563   static bool
6564   setRangeMetadataIfisBetterRange(Instruction *I,
6565                                   const ConstantRange &AssumedConstantRange) {
6566     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6567     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6568       if (!AssumedConstantRange.isEmptySet()) {
6569         I->setMetadata(LLVMContext::MD_range,
6570                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6571                                                  AssumedConstantRange));
6572         return true;
6573       }
6574     }
6575     return false;
6576   }
6577 
6578   /// See AbstractAttribute::manifest()
6579   ChangeStatus manifest(Attributor &A) override {
6580     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6581     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6582     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6583 
6584     auto &V = getAssociatedValue();
6585     if (!AssumedConstantRange.isEmptySet() &&
6586         !AssumedConstantRange.isSingleElement()) {
6587       if (Instruction *I = dyn_cast<Instruction>(&V))
6588         if (isa<CallInst>(I) || isa<LoadInst>(I))
6589           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6590             Changed = ChangeStatus::CHANGED;
6591     }
6592 
6593     return Changed;
6594   }
6595 };
6596 
6597 struct AAValueConstantRangeArgument final
6598     : AAArgumentFromCallSiteArguments<
6599           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6600   using Base = AAArgumentFromCallSiteArguments<
6601       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6602   AAValueConstantRangeArgument(const IRPosition &IRP) : Base(IRP) {}
6603 
6604   /// See AbstractAttribute::initialize(..).
6605   void initialize(Attributor &A) override {
6606     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6607       indicatePessimisticFixpoint();
6608     } else {
6609       Base::initialize(A);
6610     }
6611   }
6612 
6613   /// See AbstractAttribute::trackStatistics()
6614   void trackStatistics() const override {
6615     STATS_DECLTRACK_ARG_ATTR(value_range)
6616   }
6617 };
6618 
6619 struct AAValueConstantRangeReturned
6620     : AAReturnedFromReturnedValues<AAValueConstantRange,
6621                                    AAValueConstantRangeImpl> {
6622   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6623                                             AAValueConstantRangeImpl>;
6624   AAValueConstantRangeReturned(const IRPosition &IRP) : Base(IRP) {}
6625 
6626   /// See AbstractAttribute::initialize(...).
6627   void initialize(Attributor &A) override {}
6628 
6629   /// See AbstractAttribute::trackStatistics()
6630   void trackStatistics() const override {
6631     STATS_DECLTRACK_FNRET_ATTR(value_range)
6632   }
6633 };
6634 
6635 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6636   AAValueConstantRangeFloating(const IRPosition &IRP)
6637       : AAValueConstantRangeImpl(IRP) {}
6638 
6639   /// See AbstractAttribute::initialize(...).
6640   void initialize(Attributor &A) override {
6641     AAValueConstantRangeImpl::initialize(A);
6642     Value &V = getAssociatedValue();
6643 
6644     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6645       unionAssumed(ConstantRange(C->getValue()));
6646       indicateOptimisticFixpoint();
6647       return;
6648     }
6649 
6650     if (isa<UndefValue>(&V)) {
6651       // Collapse the undef state to 0.
6652       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6653       indicateOptimisticFixpoint();
6654       return;
6655     }
6656 
6657     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6658       return;
6659     // If it is a load instruction with range metadata, use it.
6660     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6661       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6662         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6663         return;
6664       }
6665 
6666     // We can work with PHI and select instruction as we traverse their operands
6667     // during update.
6668     if (isa<SelectInst>(V) || isa<PHINode>(V))
6669       return;
6670 
6671     // Otherwise we give up.
6672     indicatePessimisticFixpoint();
6673 
6674     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6675                       << getAssociatedValue() << "\n");
6676   }
6677 
6678   bool calculateBinaryOperator(
6679       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6680       const Instruction *CtxI,
6681       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6682     Value *LHS = BinOp->getOperand(0);
6683     Value *RHS = BinOp->getOperand(1);
6684     // TODO: Allow non integers as well.
6685     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6686       return false;
6687 
6688     auto &LHSAA =
6689         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6690     QuerriedAAs.push_back(&LHSAA);
6691     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6692 
6693     auto &RHSAA =
6694         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6695     QuerriedAAs.push_back(&RHSAA);
6696     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6697 
6698     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6699 
6700     T.unionAssumed(AssumedRange);
6701 
6702     // TODO: Track a known state too.
6703 
6704     return T.isValidState();
6705   }
6706 
6707   bool calculateCastInst(
6708       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6709       const Instruction *CtxI,
6710       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6711     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6712     // TODO: Allow non integers as well.
6713     Value &OpV = *CastI->getOperand(0);
6714     if (!OpV.getType()->isIntegerTy())
6715       return false;
6716 
6717     auto &OpAA =
6718         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6719     QuerriedAAs.push_back(&OpAA);
6720     T.unionAssumed(
6721         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6722     return T.isValidState();
6723   }
6724 
6725   bool
6726   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6727                    const Instruction *CtxI,
6728                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6729     Value *LHS = CmpI->getOperand(0);
6730     Value *RHS = CmpI->getOperand(1);
6731     // TODO: Allow non integers as well.
6732     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6733       return false;
6734 
6735     auto &LHSAA =
6736         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6737     QuerriedAAs.push_back(&LHSAA);
6738     auto &RHSAA =
6739         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6740     QuerriedAAs.push_back(&RHSAA);
6741 
6742     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6743     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6744 
6745     // If one of them is empty set, we can't decide.
6746     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6747       return true;
6748 
6749     bool MustTrue = false, MustFalse = false;
6750 
6751     auto AllowedRegion =
6752         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6753 
6754     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6755         CmpI->getPredicate(), RHSAARange);
6756 
6757     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6758       MustFalse = true;
6759 
6760     if (SatisfyingRegion.contains(LHSAARange))
6761       MustTrue = true;
6762 
6763     assert((!MustTrue || !MustFalse) &&
6764            "Either MustTrue or MustFalse should be false!");
6765 
6766     if (MustTrue)
6767       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6768     else if (MustFalse)
6769       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6770     else
6771       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6772 
6773     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6774                       << " " << RHSAA << "\n");
6775 
6776     // TODO: Track a known state too.
6777     return T.isValidState();
6778   }
6779 
6780   /// See AbstractAttribute::updateImpl(...).
6781   ChangeStatus updateImpl(Attributor &A) override {
6782     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6783                             IntegerRangeState &T, bool Stripped) -> bool {
6784       Instruction *I = dyn_cast<Instruction>(&V);
6785       if (!I || isa<CallBase>(I)) {
6786 
6787         // If the value is not instruction, we query AA to Attributor.
6788         const auto &AA =
6789             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6790 
6791         // Clamp operator is not used to utilize a program point CtxI.
6792         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6793 
6794         return T.isValidState();
6795       }
6796 
6797       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6798       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6799         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6800           return false;
6801       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6802         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6803           return false;
6804       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6805         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6806           return false;
6807       } else {
6808         // Give up with other instructions.
6809         // TODO: Add other instructions
6810 
6811         T.indicatePessimisticFixpoint();
6812         return false;
6813       }
6814 
6815       // Catch circular reasoning in a pessimistic way for now.
6816       // TODO: Check how the range evolves and if we stripped anything, see also
6817       //       AADereferenceable or AAAlign for similar situations.
6818       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6819         if (QueriedAA != this)
6820           continue;
6821         // If we are in a stady state we do not need to worry.
6822         if (T.getAssumed() == getState().getAssumed())
6823           continue;
6824         T.indicatePessimisticFixpoint();
6825       }
6826 
6827       return T.isValidState();
6828     };
6829 
6830     IntegerRangeState T(getBitWidth());
6831 
6832     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6833             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6834       return indicatePessimisticFixpoint();
6835 
6836     return clampStateAndIndicateChange(getState(), T);
6837   }
6838 
6839   /// See AbstractAttribute::trackStatistics()
6840   void trackStatistics() const override {
6841     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6842   }
6843 };
6844 
6845 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6846   AAValueConstantRangeFunction(const IRPosition &IRP)
6847       : AAValueConstantRangeImpl(IRP) {}
6848 
6849   /// See AbstractAttribute::initialize(...).
6850   ChangeStatus updateImpl(Attributor &A) override {
6851     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6852                      "not be called");
6853   }
6854 
6855   /// See AbstractAttribute::trackStatistics()
6856   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6857 };
6858 
6859 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6860   AAValueConstantRangeCallSite(const IRPosition &IRP)
6861       : AAValueConstantRangeFunction(IRP) {}
6862 
6863   /// See AbstractAttribute::trackStatistics()
6864   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6865 };
6866 
6867 struct AAValueConstantRangeCallSiteReturned
6868     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6869                                      AAValueConstantRangeImpl> {
6870   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP)
6871       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6872                                        AAValueConstantRangeImpl>(IRP) {}
6873 
6874   /// See AbstractAttribute::initialize(...).
6875   void initialize(Attributor &A) override {
6876     // If it is a load instruction with range metadata, use the metadata.
6877     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6878       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6879         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6880 
6881     AAValueConstantRangeImpl::initialize(A);
6882   }
6883 
6884   /// See AbstractAttribute::trackStatistics()
6885   void trackStatistics() const override {
6886     STATS_DECLTRACK_CSRET_ATTR(value_range)
6887   }
6888 };
6889 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6890   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP)
6891       : AAValueConstantRangeFloating(IRP) {}
6892 
6893   /// See AbstractAttribute::trackStatistics()
6894   void trackStatistics() const override {
6895     STATS_DECLTRACK_CSARG_ATTR(value_range)
6896   }
6897 };
6898 
6899 const char AAReturnedValues::ID = 0;
6900 const char AANoUnwind::ID = 0;
6901 const char AANoSync::ID = 0;
6902 const char AANoFree::ID = 0;
6903 const char AANonNull::ID = 0;
6904 const char AANoRecurse::ID = 0;
6905 const char AAWillReturn::ID = 0;
6906 const char AAUndefinedBehavior::ID = 0;
6907 const char AANoAlias::ID = 0;
6908 const char AAReachability::ID = 0;
6909 const char AANoReturn::ID = 0;
6910 const char AAIsDead::ID = 0;
6911 const char AADereferenceable::ID = 0;
6912 const char AAAlign::ID = 0;
6913 const char AANoCapture::ID = 0;
6914 const char AAValueSimplify::ID = 0;
6915 const char AAHeapToStack::ID = 0;
6916 const char AAPrivatizablePtr::ID = 0;
6917 const char AAMemoryBehavior::ID = 0;
6918 const char AAMemoryLocation::ID = 0;
6919 const char AAValueConstantRange::ID = 0;
6920 
6921 // Macro magic to create the static generator function for attributes that
6922 // follow the naming scheme.
6923 
6924 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6925   case IRPosition::PK:                                                         \
6926     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6927 
6928 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6929   case IRPosition::PK:                                                         \
6930     AA = new (A.Allocator) CLASS##SUFFIX(IRP);                                 \
6931     break;
6932 
6933 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6934   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6935     CLASS *AA = nullptr;                                                       \
6936     switch (IRP.getPositionKind()) {                                           \
6937       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6938       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6939       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6940       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6941       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6942       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6943       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6944       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6945     }                                                                          \
6946     return *AA;                                                                \
6947   }
6948 
6949 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6950   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6951     CLASS *AA = nullptr;                                                       \
6952     switch (IRP.getPositionKind()) {                                           \
6953       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6954       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6955       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6956       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6957       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6958       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6959       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6960       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6961     }                                                                          \
6962     return *AA;                                                                \
6963   }
6964 
6965 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
6966   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6967     CLASS *AA = nullptr;                                                       \
6968     switch (IRP.getPositionKind()) {                                           \
6969       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6970       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6971       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6972       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
6973       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
6974       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
6975       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
6976       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
6977     }                                                                          \
6978     return *AA;                                                                \
6979   }
6980 
6981 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
6982   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6983     CLASS *AA = nullptr;                                                       \
6984     switch (IRP.getPositionKind()) {                                           \
6985       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6986       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6987       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6988       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6989       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6990       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6991       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6992       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6993     }                                                                          \
6994     return *AA;                                                                \
6995   }
6996 
6997 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
6998   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6999     CLASS *AA = nullptr;                                                       \
7000     switch (IRP.getPositionKind()) {                                           \
7001       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7002       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7003       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7004       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7005       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7006       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7007       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7008       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7009     }                                                                          \
7010     return *AA;                                                                \
7011   }
7012 
7013 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7014 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7015 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7016 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7017 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7018 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7019 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7020 
7021 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7022 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7023 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7024 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7025 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7026 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7027 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7028 
7029 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7030 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7031 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7032 
7033 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7034 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7035 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7036 
7037 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7038 
7039 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7040 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7041 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7042 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7043 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7044 #undef SWITCH_PK_CREATE
7045 #undef SWITCH_PK_INV
7046