1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumeBundleQueries.h"
19 #include "llvm/Analysis/CaptureTracking.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/MemoryBuiltins.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 
30 #include <cassert>
31 
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "attributor"
35 
36 static cl::opt<bool> ManifestInternal(
37     "attributor-manifest-internal", cl::Hidden,
38     cl::desc("Manifest Attributor internal string attributes."),
39     cl::init(false));
40 
41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
42                                        cl::Hidden);
43 
44 STATISTIC(NumAAs, "Number of abstract attributes created");
45 
46 // Some helper macros to deal with statistics tracking.
47 //
48 // Usage:
49 // For simple IR attribute tracking overload trackStatistics in the abstract
50 // attribute and choose the right STATS_DECLTRACK_********* macro,
51 // e.g.,:
52 //  void trackStatistics() const override {
53 //    STATS_DECLTRACK_ARG_ATTR(returned)
54 //  }
55 // If there is a single "increment" side one can use the macro
56 // STATS_DECLTRACK with a custom message. If there are multiple increment
57 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
58 //
59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
60   ("Number of " #TYPE " marked '" #NAME "'")
61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
63 #define STATS_DECL(NAME, TYPE, MSG)                                            \
64   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
66 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
67   {                                                                            \
68     STATS_DECL(NAME, TYPE, MSG)                                                \
69     STATS_TRACK(NAME, TYPE)                                                    \
70   }
71 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
72   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
73 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
74   STATS_DECLTRACK(NAME, CSArguments,                                           \
75                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
76 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
77   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
78 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
79   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
80 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
82                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
83 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
84   STATS_DECLTRACK(NAME, CSReturn,                                              \
85                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
87   STATS_DECLTRACK(NAME, Floating,                                              \
88                   ("Number of floating values known to be '" #NAME "'"))
89 
90 // Specialization of the operator<< for abstract attributes subclasses. This
91 // disambiguates situations where multiple operators are applicable.
92 namespace llvm {
93 #define PIPE_OPERATOR(CLASS)                                                   \
94   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
95     return OS << static_cast<const AbstractAttribute &>(AA);                   \
96   }
97 
98 PIPE_OPERATOR(AAIsDead)
99 PIPE_OPERATOR(AANoUnwind)
100 PIPE_OPERATOR(AANoSync)
101 PIPE_OPERATOR(AANoRecurse)
102 PIPE_OPERATOR(AAWillReturn)
103 PIPE_OPERATOR(AANoReturn)
104 PIPE_OPERATOR(AAReturnedValues)
105 PIPE_OPERATOR(AANonNull)
106 PIPE_OPERATOR(AANoAlias)
107 PIPE_OPERATOR(AADereferenceable)
108 PIPE_OPERATOR(AAAlign)
109 PIPE_OPERATOR(AANoCapture)
110 PIPE_OPERATOR(AAValueSimplify)
111 PIPE_OPERATOR(AANoFree)
112 PIPE_OPERATOR(AAHeapToStack)
113 PIPE_OPERATOR(AAReachability)
114 PIPE_OPERATOR(AAMemoryBehavior)
115 PIPE_OPERATOR(AAMemoryLocation)
116 PIPE_OPERATOR(AAValueConstantRange)
117 PIPE_OPERATOR(AAPrivatizablePtr)
118 
119 #undef PIPE_OPERATOR
120 } // namespace llvm
121 
122 namespace {
123 
124 static Optional<ConstantInt *>
125 getAssumedConstantInt(Attributor &A, const Value &V,
126                       const AbstractAttribute &AA,
127                       bool &UsedAssumedInformation) {
128   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
129   if (C.hasValue())
130     return dyn_cast_or_null<ConstantInt>(C.getValue());
131   return llvm::None;
132 }
133 
134 /// Get pointer operand of memory accessing instruction. If \p I is
135 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
136 /// is set to false and the instruction is volatile, return nullptr.
137 static const Value *getPointerOperand(const Instruction *I,
138                                       bool AllowVolatile) {
139   if (auto *LI = dyn_cast<LoadInst>(I)) {
140     if (!AllowVolatile && LI->isVolatile())
141       return nullptr;
142     return LI->getPointerOperand();
143   }
144 
145   if (auto *SI = dyn_cast<StoreInst>(I)) {
146     if (!AllowVolatile && SI->isVolatile())
147       return nullptr;
148     return SI->getPointerOperand();
149   }
150 
151   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
152     if (!AllowVolatile && CXI->isVolatile())
153       return nullptr;
154     return CXI->getPointerOperand();
155   }
156 
157   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
158     if (!AllowVolatile && RMWI->isVolatile())
159       return nullptr;
160     return RMWI->getPointerOperand();
161   }
162 
163   return nullptr;
164 }
165 
166 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
167 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
168 /// getelement pointer instructions that traverse the natural type of \p Ptr if
169 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
170 /// through a cast to i8*.
171 ///
172 /// TODO: This could probably live somewhere more prominantly if it doesn't
173 ///       already exist.
174 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
175                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
176   assert(Offset >= 0 && "Negative offset not supported yet!");
177   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
178                     << "-bytes as " << *ResTy << "\n");
179 
180   // The initial type we are trying to traverse to get nice GEPs.
181   Type *Ty = Ptr->getType();
182 
183   SmallVector<Value *, 4> Indices;
184   std::string GEPName = Ptr->getName().str();
185   while (Offset) {
186     uint64_t Idx, Rem;
187 
188     if (auto *STy = dyn_cast<StructType>(Ty)) {
189       const StructLayout *SL = DL.getStructLayout(STy);
190       if (int64_t(SL->getSizeInBytes()) < Offset)
191         break;
192       Idx = SL->getElementContainingOffset(Offset);
193       assert(Idx < STy->getNumElements() && "Offset calculation error!");
194       Rem = Offset - SL->getElementOffset(Idx);
195       Ty = STy->getElementType(Idx);
196     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
197       Ty = PTy->getElementType();
198       if (!Ty->isSized())
199         break;
200       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
201       assert(ElementSize && "Expected type with size!");
202       Idx = Offset / ElementSize;
203       Rem = Offset % ElementSize;
204     } else {
205       // Non-aggregate type, we cast and make byte-wise progress now.
206       break;
207     }
208 
209     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
210                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
211 
212     GEPName += "." + std::to_string(Idx);
213     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
214     Offset = Rem;
215   }
216 
217   // Create a GEP if we collected indices above.
218   if (Indices.size())
219     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
220 
221   // If an offset is left we use byte-wise adjustment.
222   if (Offset) {
223     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
224     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
225                         GEPName + ".b" + Twine(Offset));
226   }
227 
228   // Ensure the result has the requested type.
229   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
230 
231   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
232   return Ptr;
233 }
234 
235 /// Recursively visit all values that might become \p IRP at some point. This
236 /// will be done by looking through cast instructions, selects, phis, and calls
237 /// with the "returned" attribute. Once we cannot look through the value any
238 /// further, the callback \p VisitValueCB is invoked and passed the current
239 /// value, the \p State, and a flag to indicate if we stripped anything.
240 /// Stripped means that we unpacked the value associated with \p IRP at least
241 /// once. Note that the value used for the callback may still be the value
242 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
243 /// we will never visit more values than specified by \p MaxValues.
244 template <typename AAType, typename StateTy>
245 static bool genericValueTraversal(
246     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
247     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
248         VisitValueCB,
249     const Instruction *CtxI, int MaxValues = 16,
250     function_ref<Value *(Value *)> StripCB = nullptr) {
251 
252   const AAIsDead *LivenessAA = nullptr;
253   if (IRP.getAnchorScope())
254     LivenessAA = &A.getAAFor<AAIsDead>(
255         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
256         /* TrackDependence */ false);
257   bool AnyDead = false;
258 
259   using Item = std::pair<Value *, const Instruction *>;
260   SmallSet<Item, 16> Visited;
261   SmallVector<Item, 16> Worklist;
262   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
263 
264   int Iteration = 0;
265   do {
266     Item I = Worklist.pop_back_val();
267     Value *V = I.first;
268     CtxI = I.second;
269     if (StripCB)
270       V = StripCB(V);
271 
272     // Check if we should process the current value. To prevent endless
273     // recursion keep a record of the values we followed!
274     if (!Visited.insert(I).second)
275       continue;
276 
277     // Make sure we limit the compile time for complex expressions.
278     if (Iteration++ >= MaxValues)
279       return false;
280 
281     // Explicitly look through calls with a "returned" attribute if we do
282     // not have a pointer as stripPointerCasts only works on them.
283     Value *NewV = nullptr;
284     if (V->getType()->isPointerTy()) {
285       NewV = V->stripPointerCasts();
286     } else {
287       auto *CB = dyn_cast<CallBase>(V);
288       if (CB && CB->getCalledFunction()) {
289         for (Argument &Arg : CB->getCalledFunction()->args())
290           if (Arg.hasReturnedAttr()) {
291             NewV = CB->getArgOperand(Arg.getArgNo());
292             break;
293           }
294       }
295     }
296     if (NewV && NewV != V) {
297       Worklist.push_back({NewV, CtxI});
298       continue;
299     }
300 
301     // Look through select instructions, visit both potential values.
302     if (auto *SI = dyn_cast<SelectInst>(V)) {
303       Worklist.push_back({SI->getTrueValue(), CtxI});
304       Worklist.push_back({SI->getFalseValue(), CtxI});
305       continue;
306     }
307 
308     // Look through phi nodes, visit all live operands.
309     if (auto *PHI = dyn_cast<PHINode>(V)) {
310       assert(LivenessAA &&
311              "Expected liveness in the presence of instructions!");
312       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
313         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
314         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
315                             LivenessAA,
316                             /* CheckBBLivenessOnly */ true)) {
317           AnyDead = true;
318           continue;
319         }
320         Worklist.push_back(
321             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
322       }
323       continue;
324     }
325 
326     // Once a leaf is reached we inform the user through the callback.
327     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
328       return false;
329   } while (!Worklist.empty());
330 
331   // If we actually used liveness information so we have to record a dependence.
332   if (AnyDead)
333     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
334 
335   // All values have been visited.
336   return true;
337 }
338 
339 static const Value *
340 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
341                                      const DataLayout &DL,
342                                      bool AllowNonInbounds = false) {
343   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
344   if (!Ptr)
345     return nullptr;
346 
347   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
348                                           AllowNonInbounds);
349 }
350 
351 /// Helper function to clamp a state \p S of type \p StateType with the
352 /// information in \p R and indicate/return if \p S did change (as-in update is
353 /// required to be run again).
354 template <typename StateType>
355 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
356   auto Assumed = S.getAssumed();
357   S ^= R;
358   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
359                                    : ChangeStatus::CHANGED;
360 }
361 
362 /// Clamp the information known for all returned values of a function
363 /// (identified by \p QueryingAA) into \p S.
364 template <typename AAType, typename StateType = typename AAType::StateType>
365 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
366                                      StateType &S) {
367   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
368                     << QueryingAA << " into " << S << "\n");
369 
370   assert((QueryingAA.getIRPosition().getPositionKind() ==
371               IRPosition::IRP_RETURNED ||
372           QueryingAA.getIRPosition().getPositionKind() ==
373               IRPosition::IRP_CALL_SITE_RETURNED) &&
374          "Can only clamp returned value states for a function returned or call "
375          "site returned position!");
376 
377   // Use an optional state as there might not be any return values and we want
378   // to join (IntegerState::operator&) the state of all there are.
379   Optional<StateType> T;
380 
381   // Callback for each possibly returned value.
382   auto CheckReturnValue = [&](Value &RV) -> bool {
383     const IRPosition &RVPos = IRPosition::value(RV);
384     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
385     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
386                       << " @ " << RVPos << "\n");
387     const StateType &AAS = static_cast<const StateType &>(AA.getState());
388     if (T.hasValue())
389       *T &= AAS;
390     else
391       T = AAS;
392     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
393                       << "\n");
394     return T->isValidState();
395   };
396 
397   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
398     S.indicatePessimisticFixpoint();
399   else if (T.hasValue())
400     S ^= *T;
401 }
402 
403 /// Helper class to compose two generic deduction
404 template <typename AAType, typename Base, typename StateType,
405           template <typename...> class F, template <typename...> class G>
406 struct AAComposeTwoGenericDeduction
407     : public F<AAType, G<AAType, Base, StateType>, StateType> {
408   AAComposeTwoGenericDeduction(const IRPosition &IRP, Attributor &A)
409       : F<AAType, G<AAType, Base, StateType>, StateType>(IRP, A) {}
410 
411   void initialize(Attributor &A) override {
412     F<AAType, G<AAType, Base, StateType>, StateType>::initialize(A);
413     G<AAType, Base, StateType>::initialize(A);
414   }
415 
416   /// See AbstractAttribute::updateImpl(...).
417   ChangeStatus updateImpl(Attributor &A) override {
418     ChangeStatus ChangedF =
419         F<AAType, G<AAType, Base, StateType>, StateType>::updateImpl(A);
420     ChangeStatus ChangedG = G<AAType, Base, StateType>::updateImpl(A);
421     return ChangedF | ChangedG;
422   }
423 };
424 
425 /// Helper class for generic deduction: return value -> returned position.
426 template <typename AAType, typename Base,
427           typename StateType = typename Base::StateType>
428 struct AAReturnedFromReturnedValues : public Base {
429   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
430       : Base(IRP, A) {}
431 
432   /// See AbstractAttribute::updateImpl(...).
433   ChangeStatus updateImpl(Attributor &A) override {
434     StateType S(StateType::getBestState(this->getState()));
435     clampReturnedValueStates<AAType, StateType>(A, *this, S);
436     // TODO: If we know we visited all returned values, thus no are assumed
437     // dead, we can take the known information from the state T.
438     return clampStateAndIndicateChange<StateType>(this->getState(), S);
439   }
440 };
441 
442 /// Clamp the information known at all call sites for a given argument
443 /// (identified by \p QueryingAA) into \p S.
444 template <typename AAType, typename StateType = typename AAType::StateType>
445 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
446                                         StateType &S) {
447   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
448                     << QueryingAA << " into " << S << "\n");
449 
450   assert(QueryingAA.getIRPosition().getPositionKind() ==
451              IRPosition::IRP_ARGUMENT &&
452          "Can only clamp call site argument states for an argument position!");
453 
454   // Use an optional state as there might not be any return values and we want
455   // to join (IntegerState::operator&) the state of all there are.
456   Optional<StateType> T;
457 
458   // The argument number which is also the call site argument number.
459   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
460 
461   auto CallSiteCheck = [&](AbstractCallSite ACS) {
462     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
463     // Check if a coresponding argument was found or if it is on not associated
464     // (which can happen for callback calls).
465     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
466       return false;
467 
468     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
469     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
470                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
471     const StateType &AAS = static_cast<const StateType &>(AA.getState());
472     if (T.hasValue())
473       *T &= AAS;
474     else
475       T = AAS;
476     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
477                       << "\n");
478     return T->isValidState();
479   };
480 
481   bool AllCallSitesKnown;
482   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
483                               AllCallSitesKnown))
484     S.indicatePessimisticFixpoint();
485   else if (T.hasValue())
486     S ^= *T;
487 }
488 
489 /// Helper class for generic deduction: call site argument -> argument position.
490 template <typename AAType, typename Base,
491           typename StateType = typename AAType::StateType>
492 struct AAArgumentFromCallSiteArguments : public Base {
493   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
494       : Base(IRP, A) {}
495 
496   /// See AbstractAttribute::updateImpl(...).
497   ChangeStatus updateImpl(Attributor &A) override {
498     StateType S(StateType::getBestState(this->getState()));
499     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
500     // TODO: If we know we visited all incoming values, thus no are assumed
501     // dead, we can take the known information from the state T.
502     return clampStateAndIndicateChange<StateType>(this->getState(), S);
503   }
504 };
505 
506 /// Helper class for generic replication: function returned -> cs returned.
507 template <typename AAType, typename Base,
508           typename StateType = typename Base::StateType>
509 struct AACallSiteReturnedFromReturned : public Base {
510   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
511       : Base(IRP, A) {}
512 
513   /// See AbstractAttribute::updateImpl(...).
514   ChangeStatus updateImpl(Attributor &A) override {
515     assert(this->getIRPosition().getPositionKind() ==
516                IRPosition::IRP_CALL_SITE_RETURNED &&
517            "Can only wrap function returned positions for call site returned "
518            "positions!");
519     auto &S = this->getState();
520 
521     const Function *AssociatedFunction =
522         this->getIRPosition().getAssociatedFunction();
523     if (!AssociatedFunction)
524       return S.indicatePessimisticFixpoint();
525 
526     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
527     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
528     return clampStateAndIndicateChange(
529         S, static_cast<const StateType &>(AA.getState()));
530   }
531 };
532 
533 /// Helper class for generic deduction using must-be-executed-context
534 /// Base class is required to have `followUse` method.
535 
536 /// bool followUse(Attributor &A, const Use *U, const Instruction *I)
537 /// U - Underlying use.
538 /// I - The user of the \p U.
539 /// `followUse` returns true if the value should be tracked transitively.
540 
541 template <typename AAType, typename Base,
542           typename StateType = typename AAType::StateType>
543 struct AAFromMustBeExecutedContext : public Base {
544   AAFromMustBeExecutedContext(const IRPosition &IRP, Attributor &A)
545       : Base(IRP, A) {}
546 
547   void initialize(Attributor &A) override {
548     Base::initialize(A);
549     const IRPosition &IRP = this->getIRPosition();
550     Instruction *CtxI = IRP.getCtxI();
551 
552     if (!CtxI)
553       return;
554 
555     for (const Use &U : IRP.getAssociatedValue().uses())
556       Uses.insert(&U);
557   }
558 
559   /// Helper function to accumulate uses.
560   void followUsesInContext(Attributor &A,
561                            MustBeExecutedContextExplorer &Explorer,
562                            const Instruction *CtxI,
563                            SetVector<const Use *> &Uses, StateType &State) {
564     auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
565     for (unsigned u = 0; u < Uses.size(); ++u) {
566       const Use *U = Uses[u];
567       if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
568         bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
569         if (Found && Base::followUse(A, U, UserI, State))
570           for (const Use &Us : UserI->uses())
571             Uses.insert(&Us);
572       }
573     }
574   }
575 
576   /// See AbstractAttribute::updateImpl(...).
577   ChangeStatus updateImpl(Attributor &A) override {
578     auto BeforeState = this->getState();
579     auto &S = this->getState();
580     Instruction *CtxI = this->getIRPosition().getCtxI();
581     if (!CtxI)
582       return ChangeStatus::UNCHANGED;
583 
584     MustBeExecutedContextExplorer &Explorer =
585         A.getInfoCache().getMustBeExecutedContextExplorer();
586 
587     followUsesInContext(A, Explorer, CtxI, Uses, S);
588 
589     if (this->isAtFixpoint())
590       return ChangeStatus::CHANGED;
591 
592     SmallVector<const BranchInst *, 4> BrInsts;
593     auto Pred = [&](const Instruction *I) {
594       if (const BranchInst *Br = dyn_cast<BranchInst>(I))
595         if (Br->isConditional())
596           BrInsts.push_back(Br);
597       return true;
598     };
599 
600     // Here, accumulate conditional branch instructions in the context. We
601     // explore the child paths and collect the known states. The disjunction of
602     // those states can be merged to its own state. Let ParentState_i be a state
603     // to indicate the known information for an i-th branch instruction in the
604     // context. ChildStates are created for its successors respectively.
605     //
606     // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
607     // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
608     //      ...
609     // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
610     //
611     // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
612     //
613     // FIXME: Currently, recursive branches are not handled. For example, we
614     // can't deduce that ptr must be dereferenced in below function.
615     //
616     // void f(int a, int c, int *ptr) {
617     //    if(a)
618     //      if (b) {
619     //        *ptr = 0;
620     //      } else {
621     //        *ptr = 1;
622     //      }
623     //    else {
624     //      if (b) {
625     //        *ptr = 0;
626     //      } else {
627     //        *ptr = 1;
628     //      }
629     //    }
630     // }
631 
632     Explorer.checkForAllContext(CtxI, Pred);
633     for (const BranchInst *Br : BrInsts) {
634       StateType ParentState;
635 
636       // The known state of the parent state is a conjunction of children's
637       // known states so it is initialized with a best state.
638       ParentState.indicateOptimisticFixpoint();
639 
640       for (const BasicBlock *BB : Br->successors()) {
641         StateType ChildState;
642 
643         size_t BeforeSize = Uses.size();
644         followUsesInContext(A, Explorer, &BB->front(), Uses, ChildState);
645 
646         // Erase uses which only appear in the child.
647         for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
648           It = Uses.erase(It);
649 
650         ParentState &= ChildState;
651       }
652 
653       // Use only known state.
654       S += ParentState;
655     }
656 
657     return BeforeState == S ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
658   }
659 
660 private:
661   /// Container for (transitive) uses of the associated value.
662   SetVector<const Use *> Uses;
663 };
664 
665 template <typename AAType, typename Base,
666           typename StateType = typename AAType::StateType>
667 using AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext =
668     AAComposeTwoGenericDeduction<AAType, Base, StateType,
669                                  AAFromMustBeExecutedContext,
670                                  AAArgumentFromCallSiteArguments>;
671 
672 template <typename AAType, typename Base,
673           typename StateType = typename AAType::StateType>
674 using AACallSiteReturnedFromReturnedAndMustBeExecutedContext =
675     AAComposeTwoGenericDeduction<AAType, Base, StateType,
676                                  AAFromMustBeExecutedContext,
677                                  AACallSiteReturnedFromReturned>;
678 
679 /// -----------------------NoUnwind Function Attribute--------------------------
680 
681 struct AANoUnwindImpl : AANoUnwind {
682   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
683 
684   const std::string getAsStr() const override {
685     return getAssumed() ? "nounwind" : "may-unwind";
686   }
687 
688   /// See AbstractAttribute::updateImpl(...).
689   ChangeStatus updateImpl(Attributor &A) override {
690     auto Opcodes = {
691         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
692         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
693         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
694 
695     auto CheckForNoUnwind = [&](Instruction &I) {
696       if (!I.mayThrow())
697         return true;
698 
699       if (const auto *CB = dyn_cast<CallBase>(&I)) {
700         const auto &NoUnwindAA =
701             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
702         return NoUnwindAA.isAssumedNoUnwind();
703       }
704       return false;
705     };
706 
707     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
708       return indicatePessimisticFixpoint();
709 
710     return ChangeStatus::UNCHANGED;
711   }
712 };
713 
714 struct AANoUnwindFunction final : public AANoUnwindImpl {
715   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
716       : AANoUnwindImpl(IRP, A) {}
717 
718   /// See AbstractAttribute::trackStatistics()
719   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
720 };
721 
722 /// NoUnwind attribute deduction for a call sites.
723 struct AANoUnwindCallSite final : AANoUnwindImpl {
724   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
725       : AANoUnwindImpl(IRP, A) {}
726 
727   /// See AbstractAttribute::initialize(...).
728   void initialize(Attributor &A) override {
729     AANoUnwindImpl::initialize(A);
730     Function *F = getAssociatedFunction();
731     if (!F)
732       indicatePessimisticFixpoint();
733   }
734 
735   /// See AbstractAttribute::updateImpl(...).
736   ChangeStatus updateImpl(Attributor &A) override {
737     // TODO: Once we have call site specific value information we can provide
738     //       call site specific liveness information and then it makes
739     //       sense to specialize attributes for call sites arguments instead of
740     //       redirecting requests to the callee argument.
741     Function *F = getAssociatedFunction();
742     const IRPosition &FnPos = IRPosition::function(*F);
743     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
744     return clampStateAndIndicateChange(
745         getState(),
746         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
747   }
748 
749   /// See AbstractAttribute::trackStatistics()
750   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
751 };
752 
753 /// --------------------- Function Return Values -------------------------------
754 
755 /// "Attribute" that collects all potential returned values and the return
756 /// instructions that they arise from.
757 ///
758 /// If there is a unique returned value R, the manifest method will:
759 ///   - mark R with the "returned" attribute, if R is an argument.
760 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
761 
762   /// Mapping of values potentially returned by the associated function to the
763   /// return instructions that might return them.
764   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
765 
766   /// Mapping to remember the number of returned values for a call site such
767   /// that we can avoid updates if nothing changed.
768   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
769 
770   /// Set of unresolved calls returned by the associated function.
771   SmallSetVector<CallBase *, 4> UnresolvedCalls;
772 
773   /// State flags
774   ///
775   ///{
776   bool IsFixed = false;
777   bool IsValidState = true;
778   ///}
779 
780 public:
781   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
782       : AAReturnedValues(IRP, A) {}
783 
784   /// See AbstractAttribute::initialize(...).
785   void initialize(Attributor &A) override {
786     // Reset the state.
787     IsFixed = false;
788     IsValidState = true;
789     ReturnedValues.clear();
790 
791     Function *F = getAssociatedFunction();
792     if (!F) {
793       indicatePessimisticFixpoint();
794       return;
795     }
796     assert(!F->getReturnType()->isVoidTy() &&
797            "Did not expect a void return type!");
798 
799     // The map from instruction opcodes to those instructions in the function.
800     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
801 
802     // Look through all arguments, if one is marked as returned we are done.
803     for (Argument &Arg : F->args()) {
804       if (Arg.hasReturnedAttr()) {
805         auto &ReturnInstSet = ReturnedValues[&Arg];
806         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
807           for (Instruction *RI : *Insts)
808             ReturnInstSet.insert(cast<ReturnInst>(RI));
809 
810         indicateOptimisticFixpoint();
811         return;
812       }
813     }
814 
815     if (!A.isFunctionIPOAmendable(*F))
816       indicatePessimisticFixpoint();
817   }
818 
819   /// See AbstractAttribute::manifest(...).
820   ChangeStatus manifest(Attributor &A) override;
821 
822   /// See AbstractAttribute::getState(...).
823   AbstractState &getState() override { return *this; }
824 
825   /// See AbstractAttribute::getState(...).
826   const AbstractState &getState() const override { return *this; }
827 
828   /// See AbstractAttribute::updateImpl(Attributor &A).
829   ChangeStatus updateImpl(Attributor &A) override;
830 
831   llvm::iterator_range<iterator> returned_values() override {
832     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
833   }
834 
835   llvm::iterator_range<const_iterator> returned_values() const override {
836     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
837   }
838 
839   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
840     return UnresolvedCalls;
841   }
842 
843   /// Return the number of potential return values, -1 if unknown.
844   size_t getNumReturnValues() const override {
845     return isValidState() ? ReturnedValues.size() : -1;
846   }
847 
848   /// Return an assumed unique return value if a single candidate is found. If
849   /// there cannot be one, return a nullptr. If it is not clear yet, return the
850   /// Optional::NoneType.
851   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
852 
853   /// See AbstractState::checkForAllReturnedValues(...).
854   bool checkForAllReturnedValuesAndReturnInsts(
855       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
856       const override;
857 
858   /// Pretty print the attribute similar to the IR representation.
859   const std::string getAsStr() const override;
860 
861   /// See AbstractState::isAtFixpoint().
862   bool isAtFixpoint() const override { return IsFixed; }
863 
864   /// See AbstractState::isValidState().
865   bool isValidState() const override { return IsValidState; }
866 
867   /// See AbstractState::indicateOptimisticFixpoint(...).
868   ChangeStatus indicateOptimisticFixpoint() override {
869     IsFixed = true;
870     return ChangeStatus::UNCHANGED;
871   }
872 
873   ChangeStatus indicatePessimisticFixpoint() override {
874     IsFixed = true;
875     IsValidState = false;
876     return ChangeStatus::CHANGED;
877   }
878 };
879 
880 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
881   ChangeStatus Changed = ChangeStatus::UNCHANGED;
882 
883   // Bookkeeping.
884   assert(isValidState());
885   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
886                   "Number of function with known return values");
887 
888   // Check if we have an assumed unique return value that we could manifest.
889   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
890 
891   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
892     return Changed;
893 
894   // Bookkeeping.
895   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
896                   "Number of function with unique return");
897 
898   // Callback to replace the uses of CB with the constant C.
899   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
900     if (CB.getNumUses() == 0)
901       return ChangeStatus::UNCHANGED;
902     if (A.changeValueAfterManifest(CB, C))
903       return ChangeStatus::CHANGED;
904     return ChangeStatus::UNCHANGED;
905   };
906 
907   // If the assumed unique return value is an argument, annotate it.
908   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
909     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
910             getAssociatedFunction()->getReturnType())) {
911       // TODO: This should be handled differently!
912       this->AnchorVal = UniqueRVArg;
913       this->KindOrArgNo = UniqueRVArg->getArgNo();
914       Changed = IRAttribute::manifest(A);
915     }
916   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
917     // We can replace the returned value with the unique returned constant.
918     Value &AnchorValue = getAnchorValue();
919     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
920       for (const Use &U : F->uses())
921         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
922           if (CB->isCallee(&U)) {
923             Constant *RVCCast =
924                 CB->getType() == RVC->getType()
925                     ? RVC
926                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
927             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
928           }
929     } else {
930       assert(isa<CallBase>(AnchorValue) &&
931              "Expcected a function or call base anchor!");
932       Constant *RVCCast =
933           AnchorValue.getType() == RVC->getType()
934               ? RVC
935               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
936       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
937     }
938     if (Changed == ChangeStatus::CHANGED)
939       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
940                       "Number of function returns replaced by constant return");
941   }
942 
943   return Changed;
944 }
945 
946 const std::string AAReturnedValuesImpl::getAsStr() const {
947   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
948          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
949          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
950 }
951 
952 Optional<Value *>
953 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
954   // If checkForAllReturnedValues provides a unique value, ignoring potential
955   // undef values that can also be present, it is assumed to be the actual
956   // return value and forwarded to the caller of this method. If there are
957   // multiple, a nullptr is returned indicating there cannot be a unique
958   // returned value.
959   Optional<Value *> UniqueRV;
960 
961   auto Pred = [&](Value &RV) -> bool {
962     // If we found a second returned value and neither the current nor the saved
963     // one is an undef, there is no unique returned value. Undefs are special
964     // since we can pretend they have any value.
965     if (UniqueRV.hasValue() && UniqueRV != &RV &&
966         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
967       UniqueRV = nullptr;
968       return false;
969     }
970 
971     // Do not overwrite a value with an undef.
972     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
973       UniqueRV = &RV;
974 
975     return true;
976   };
977 
978   if (!A.checkForAllReturnedValues(Pred, *this))
979     UniqueRV = nullptr;
980 
981   return UniqueRV;
982 }
983 
984 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
985     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
986     const {
987   if (!isValidState())
988     return false;
989 
990   // Check all returned values but ignore call sites as long as we have not
991   // encountered an overdefined one during an update.
992   for (auto &It : ReturnedValues) {
993     Value *RV = It.first;
994 
995     CallBase *CB = dyn_cast<CallBase>(RV);
996     if (CB && !UnresolvedCalls.count(CB))
997       continue;
998 
999     if (!Pred(*RV, It.second))
1000       return false;
1001   }
1002 
1003   return true;
1004 }
1005 
1006 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1007   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1008   bool Changed = false;
1009 
1010   // State used in the value traversals starting in returned values.
1011   struct RVState {
1012     // The map in which we collect return values -> return instrs.
1013     decltype(ReturnedValues) &RetValsMap;
1014     // The flag to indicate a change.
1015     bool &Changed;
1016     // The return instrs we come from.
1017     SmallSetVector<ReturnInst *, 4> RetInsts;
1018   };
1019 
1020   // Callback for a leaf value returned by the associated function.
1021   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1022                          bool) -> bool {
1023     auto Size = RVS.RetValsMap[&Val].size();
1024     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1025     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1026     RVS.Changed |= Inserted;
1027     LLVM_DEBUG({
1028       if (Inserted)
1029         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1030                << " => " << RVS.RetInsts.size() << "\n";
1031     });
1032     return true;
1033   };
1034 
1035   // Helper method to invoke the generic value traversal.
1036   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1037                                 const Instruction *CtxI) {
1038     IRPosition RetValPos = IRPosition::value(RV);
1039     return genericValueTraversal<AAReturnedValues, RVState>(
1040         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
1041   };
1042 
1043   // Callback for all "return intructions" live in the associated function.
1044   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1045     ReturnInst &Ret = cast<ReturnInst>(I);
1046     RVState RVS({ReturnedValues, Changed, {}});
1047     RVS.RetInsts.insert(&Ret);
1048     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1049   };
1050 
1051   // Start by discovering returned values from all live returned instructions in
1052   // the associated function.
1053   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1054     return indicatePessimisticFixpoint();
1055 
1056   // Once returned values "directly" present in the code are handled we try to
1057   // resolve returned calls.
1058   decltype(ReturnedValues) NewRVsMap;
1059   for (auto &It : ReturnedValues) {
1060     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1061                       << " by #" << It.second.size() << " RIs\n");
1062     CallBase *CB = dyn_cast<CallBase>(It.first);
1063     if (!CB || UnresolvedCalls.count(CB))
1064       continue;
1065 
1066     if (!CB->getCalledFunction()) {
1067       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1068                         << "\n");
1069       UnresolvedCalls.insert(CB);
1070       continue;
1071     }
1072 
1073     // TODO: use the function scope once we have call site AAReturnedValues.
1074     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1075         *this, IRPosition::function(*CB->getCalledFunction()));
1076     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1077                       << RetValAA << "\n");
1078 
1079     // Skip dead ends, thus if we do not know anything about the returned
1080     // call we mark it as unresolved and it will stay that way.
1081     if (!RetValAA.getState().isValidState()) {
1082       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1083                         << "\n");
1084       UnresolvedCalls.insert(CB);
1085       continue;
1086     }
1087 
1088     // Do not try to learn partial information. If the callee has unresolved
1089     // return values we will treat the call as unresolved/opaque.
1090     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1091     if (!RetValAAUnresolvedCalls.empty()) {
1092       UnresolvedCalls.insert(CB);
1093       continue;
1094     }
1095 
1096     // Now check if we can track transitively returned values. If possible, thus
1097     // if all return value can be represented in the current scope, do so.
1098     bool Unresolved = false;
1099     for (auto &RetValAAIt : RetValAA.returned_values()) {
1100       Value *RetVal = RetValAAIt.first;
1101       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1102           isa<Constant>(RetVal))
1103         continue;
1104       // Anything that did not fit in the above categories cannot be resolved,
1105       // mark the call as unresolved.
1106       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1107                            "cannot be translated: "
1108                         << *RetVal << "\n");
1109       UnresolvedCalls.insert(CB);
1110       Unresolved = true;
1111       break;
1112     }
1113 
1114     if (Unresolved)
1115       continue;
1116 
1117     // Now track transitively returned values.
1118     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1119     if (NumRetAA == RetValAA.getNumReturnValues()) {
1120       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1121                            "changed since it was seen last\n");
1122       continue;
1123     }
1124     NumRetAA = RetValAA.getNumReturnValues();
1125 
1126     for (auto &RetValAAIt : RetValAA.returned_values()) {
1127       Value *RetVal = RetValAAIt.first;
1128       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1129         // Arguments are mapped to call site operands and we begin the traversal
1130         // again.
1131         bool Unused = false;
1132         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1133         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1134         continue;
1135       } else if (isa<CallBase>(RetVal)) {
1136         // Call sites are resolved by the callee attribute over time, no need to
1137         // do anything for us.
1138         continue;
1139       } else if (isa<Constant>(RetVal)) {
1140         // Constants are valid everywhere, we can simply take them.
1141         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1142         continue;
1143       }
1144     }
1145   }
1146 
1147   // To avoid modifications to the ReturnedValues map while we iterate over it
1148   // we kept record of potential new entries in a copy map, NewRVsMap.
1149   for (auto &It : NewRVsMap) {
1150     assert(!It.second.empty() && "Entry does not add anything.");
1151     auto &ReturnInsts = ReturnedValues[It.first];
1152     for (ReturnInst *RI : It.second)
1153       if (ReturnInsts.insert(RI)) {
1154         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1155                           << *It.first << " => " << *RI << "\n");
1156         Changed = true;
1157       }
1158   }
1159 
1160   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1161   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1162 }
1163 
1164 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1165   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1166       : AAReturnedValuesImpl(IRP, A) {}
1167 
1168   /// See AbstractAttribute::trackStatistics()
1169   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1170 };
1171 
1172 /// Returned values information for a call sites.
1173 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1174   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1175       : AAReturnedValuesImpl(IRP, A) {}
1176 
1177   /// See AbstractAttribute::initialize(...).
1178   void initialize(Attributor &A) override {
1179     // TODO: Once we have call site specific value information we can provide
1180     //       call site specific liveness information and then it makes
1181     //       sense to specialize attributes for call sites instead of
1182     //       redirecting requests to the callee.
1183     llvm_unreachable("Abstract attributes for returned values are not "
1184                      "supported for call sites yet!");
1185   }
1186 
1187   /// See AbstractAttribute::updateImpl(...).
1188   ChangeStatus updateImpl(Attributor &A) override {
1189     return indicatePessimisticFixpoint();
1190   }
1191 
1192   /// See AbstractAttribute::trackStatistics()
1193   void trackStatistics() const override {}
1194 };
1195 
1196 /// ------------------------ NoSync Function Attribute -------------------------
1197 
1198 struct AANoSyncImpl : AANoSync {
1199   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1200 
1201   const std::string getAsStr() const override {
1202     return getAssumed() ? "nosync" : "may-sync";
1203   }
1204 
1205   /// See AbstractAttribute::updateImpl(...).
1206   ChangeStatus updateImpl(Attributor &A) override;
1207 
1208   /// Helper function used to determine whether an instruction is non-relaxed
1209   /// atomic. In other words, if an atomic instruction does not have unordered
1210   /// or monotonic ordering
1211   static bool isNonRelaxedAtomic(Instruction *I);
1212 
1213   /// Helper function used to determine whether an instruction is volatile.
1214   static bool isVolatile(Instruction *I);
1215 
1216   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1217   /// memset).
1218   static bool isNoSyncIntrinsic(Instruction *I);
1219 };
1220 
1221 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1222   if (!I->isAtomic())
1223     return false;
1224 
1225   AtomicOrdering Ordering;
1226   switch (I->getOpcode()) {
1227   case Instruction::AtomicRMW:
1228     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1229     break;
1230   case Instruction::Store:
1231     Ordering = cast<StoreInst>(I)->getOrdering();
1232     break;
1233   case Instruction::Load:
1234     Ordering = cast<LoadInst>(I)->getOrdering();
1235     break;
1236   case Instruction::Fence: {
1237     auto *FI = cast<FenceInst>(I);
1238     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1239       return false;
1240     Ordering = FI->getOrdering();
1241     break;
1242   }
1243   case Instruction::AtomicCmpXchg: {
1244     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1245     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1246     // Only if both are relaxed, than it can be treated as relaxed.
1247     // Otherwise it is non-relaxed.
1248     if (Success != AtomicOrdering::Unordered &&
1249         Success != AtomicOrdering::Monotonic)
1250       return true;
1251     if (Failure != AtomicOrdering::Unordered &&
1252         Failure != AtomicOrdering::Monotonic)
1253       return true;
1254     return false;
1255   }
1256   default:
1257     llvm_unreachable(
1258         "New atomic operations need to be known in the attributor.");
1259   }
1260 
1261   // Relaxed.
1262   if (Ordering == AtomicOrdering::Unordered ||
1263       Ordering == AtomicOrdering::Monotonic)
1264     return false;
1265   return true;
1266 }
1267 
1268 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1269 /// FIXME: We should ipmrove the handling of intrinsics.
1270 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1271   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1272     switch (II->getIntrinsicID()) {
1273     /// Element wise atomic memory intrinsics are can only be unordered,
1274     /// therefore nosync.
1275     case Intrinsic::memset_element_unordered_atomic:
1276     case Intrinsic::memmove_element_unordered_atomic:
1277     case Intrinsic::memcpy_element_unordered_atomic:
1278       return true;
1279     case Intrinsic::memset:
1280     case Intrinsic::memmove:
1281     case Intrinsic::memcpy:
1282       if (!cast<MemIntrinsic>(II)->isVolatile())
1283         return true;
1284       return false;
1285     default:
1286       return false;
1287     }
1288   }
1289   return false;
1290 }
1291 
1292 bool AANoSyncImpl::isVolatile(Instruction *I) {
1293   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1294 
1295   switch (I->getOpcode()) {
1296   case Instruction::AtomicRMW:
1297     return cast<AtomicRMWInst>(I)->isVolatile();
1298   case Instruction::Store:
1299     return cast<StoreInst>(I)->isVolatile();
1300   case Instruction::Load:
1301     return cast<LoadInst>(I)->isVolatile();
1302   case Instruction::AtomicCmpXchg:
1303     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1304   default:
1305     return false;
1306   }
1307 }
1308 
1309 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1310 
1311   auto CheckRWInstForNoSync = [&](Instruction &I) {
1312     /// We are looking for volatile instructions or Non-Relaxed atomics.
1313     /// FIXME: We should improve the handling of intrinsics.
1314 
1315     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1316       return true;
1317 
1318     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1319       if (CB->hasFnAttr(Attribute::NoSync))
1320         return true;
1321 
1322       const auto &NoSyncAA =
1323           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1324       if (NoSyncAA.isAssumedNoSync())
1325         return true;
1326       return false;
1327     }
1328 
1329     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1330       return true;
1331 
1332     return false;
1333   };
1334 
1335   auto CheckForNoSync = [&](Instruction &I) {
1336     // At this point we handled all read/write effects and they are all
1337     // nosync, so they can be skipped.
1338     if (I.mayReadOrWriteMemory())
1339       return true;
1340 
1341     // non-convergent and readnone imply nosync.
1342     return !cast<CallBase>(I).isConvergent();
1343   };
1344 
1345   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1346       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1347     return indicatePessimisticFixpoint();
1348 
1349   return ChangeStatus::UNCHANGED;
1350 }
1351 
1352 struct AANoSyncFunction final : public AANoSyncImpl {
1353   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1354       : AANoSyncImpl(IRP, A) {}
1355 
1356   /// See AbstractAttribute::trackStatistics()
1357   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1358 };
1359 
1360 /// NoSync attribute deduction for a call sites.
1361 struct AANoSyncCallSite final : AANoSyncImpl {
1362   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1363       : AANoSyncImpl(IRP, A) {}
1364 
1365   /// See AbstractAttribute::initialize(...).
1366   void initialize(Attributor &A) override {
1367     AANoSyncImpl::initialize(A);
1368     Function *F = getAssociatedFunction();
1369     if (!F)
1370       indicatePessimisticFixpoint();
1371   }
1372 
1373   /// See AbstractAttribute::updateImpl(...).
1374   ChangeStatus updateImpl(Attributor &A) override {
1375     // TODO: Once we have call site specific value information we can provide
1376     //       call site specific liveness information and then it makes
1377     //       sense to specialize attributes for call sites arguments instead of
1378     //       redirecting requests to the callee argument.
1379     Function *F = getAssociatedFunction();
1380     const IRPosition &FnPos = IRPosition::function(*F);
1381     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1382     return clampStateAndIndicateChange(
1383         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1384   }
1385 
1386   /// See AbstractAttribute::trackStatistics()
1387   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1388 };
1389 
1390 /// ------------------------ No-Free Attributes ----------------------------
1391 
1392 struct AANoFreeImpl : public AANoFree {
1393   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1394 
1395   /// See AbstractAttribute::updateImpl(...).
1396   ChangeStatus updateImpl(Attributor &A) override {
1397     auto CheckForNoFree = [&](Instruction &I) {
1398       const auto &CB = cast<CallBase>(I);
1399       if (CB.hasFnAttr(Attribute::NoFree))
1400         return true;
1401 
1402       const auto &NoFreeAA =
1403           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1404       return NoFreeAA.isAssumedNoFree();
1405     };
1406 
1407     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1408       return indicatePessimisticFixpoint();
1409     return ChangeStatus::UNCHANGED;
1410   }
1411 
1412   /// See AbstractAttribute::getAsStr().
1413   const std::string getAsStr() const override {
1414     return getAssumed() ? "nofree" : "may-free";
1415   }
1416 };
1417 
1418 struct AANoFreeFunction final : public AANoFreeImpl {
1419   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1420       : AANoFreeImpl(IRP, A) {}
1421 
1422   /// See AbstractAttribute::trackStatistics()
1423   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1424 };
1425 
1426 /// NoFree attribute deduction for a call sites.
1427 struct AANoFreeCallSite final : AANoFreeImpl {
1428   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1429       : AANoFreeImpl(IRP, A) {}
1430 
1431   /// See AbstractAttribute::initialize(...).
1432   void initialize(Attributor &A) override {
1433     AANoFreeImpl::initialize(A);
1434     Function *F = getAssociatedFunction();
1435     if (!F)
1436       indicatePessimisticFixpoint();
1437   }
1438 
1439   /// See AbstractAttribute::updateImpl(...).
1440   ChangeStatus updateImpl(Attributor &A) override {
1441     // TODO: Once we have call site specific value information we can provide
1442     //       call site specific liveness information and then it makes
1443     //       sense to specialize attributes for call sites arguments instead of
1444     //       redirecting requests to the callee argument.
1445     Function *F = getAssociatedFunction();
1446     const IRPosition &FnPos = IRPosition::function(*F);
1447     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1448     return clampStateAndIndicateChange(
1449         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1450   }
1451 
1452   /// See AbstractAttribute::trackStatistics()
1453   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1454 };
1455 
1456 /// NoFree attribute for floating values.
1457 struct AANoFreeFloating : AANoFreeImpl {
1458   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1459       : AANoFreeImpl(IRP, A) {}
1460 
1461   /// See AbstractAttribute::trackStatistics()
1462   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1463 
1464   /// See Abstract Attribute::updateImpl(...).
1465   ChangeStatus updateImpl(Attributor &A) override {
1466     const IRPosition &IRP = getIRPosition();
1467 
1468     const auto &NoFreeAA =
1469         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1470     if (NoFreeAA.isAssumedNoFree())
1471       return ChangeStatus::UNCHANGED;
1472 
1473     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1474     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1475       Instruction *UserI = cast<Instruction>(U.getUser());
1476       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1477         if (CB->isBundleOperand(&U))
1478           return false;
1479         if (!CB->isArgOperand(&U))
1480           return true;
1481         unsigned ArgNo = CB->getArgOperandNo(&U);
1482 
1483         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1484             *this, IRPosition::callsite_argument(*CB, ArgNo));
1485         return NoFreeArg.isAssumedNoFree();
1486       }
1487 
1488       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1489           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1490         Follow = true;
1491         return true;
1492       }
1493       if (isa<ReturnInst>(UserI))
1494         return true;
1495 
1496       // Unknown user.
1497       return false;
1498     };
1499     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1500       return indicatePessimisticFixpoint();
1501 
1502     return ChangeStatus::UNCHANGED;
1503   }
1504 };
1505 
1506 /// NoFree attribute for a call site argument.
1507 struct AANoFreeArgument final : AANoFreeFloating {
1508   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1509       : AANoFreeFloating(IRP, A) {}
1510 
1511   /// See AbstractAttribute::trackStatistics()
1512   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1513 };
1514 
1515 /// NoFree attribute for call site arguments.
1516 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1517   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   /// See AbstractAttribute::updateImpl(...).
1521   ChangeStatus updateImpl(Attributor &A) override {
1522     // TODO: Once we have call site specific value information we can provide
1523     //       call site specific liveness information and then it makes
1524     //       sense to specialize attributes for call sites arguments instead of
1525     //       redirecting requests to the callee argument.
1526     Argument *Arg = getAssociatedArgument();
1527     if (!Arg)
1528       return indicatePessimisticFixpoint();
1529     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1530     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1531     return clampStateAndIndicateChange(
1532         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1533   }
1534 
1535   /// See AbstractAttribute::trackStatistics()
1536   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1537 };
1538 
1539 /// NoFree attribute for function return value.
1540 struct AANoFreeReturned final : AANoFreeFloating {
1541   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1542       : AANoFreeFloating(IRP, A) {
1543     llvm_unreachable("NoFree is not applicable to function returns!");
1544   }
1545 
1546   /// See AbstractAttribute::initialize(...).
1547   void initialize(Attributor &A) override {
1548     llvm_unreachable("NoFree is not applicable to function returns!");
1549   }
1550 
1551   /// See AbstractAttribute::updateImpl(...).
1552   ChangeStatus updateImpl(Attributor &A) override {
1553     llvm_unreachable("NoFree is not applicable to function returns!");
1554   }
1555 
1556   /// See AbstractAttribute::trackStatistics()
1557   void trackStatistics() const override {}
1558 };
1559 
1560 /// NoFree attribute deduction for a call site return value.
1561 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1562   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1563       : AANoFreeFloating(IRP, A) {}
1564 
1565   ChangeStatus manifest(Attributor &A) override {
1566     return ChangeStatus::UNCHANGED;
1567   }
1568   /// See AbstractAttribute::trackStatistics()
1569   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1570 };
1571 
1572 /// ------------------------ NonNull Argument Attribute ------------------------
1573 static int64_t getKnownNonNullAndDerefBytesForUse(
1574     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1575     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1576   TrackUse = false;
1577 
1578   const Value *UseV = U->get();
1579   if (!UseV->getType()->isPointerTy())
1580     return 0;
1581 
1582   Type *PtrTy = UseV->getType();
1583   const Function *F = I->getFunction();
1584   bool NullPointerIsDefined =
1585       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1586   const DataLayout &DL = A.getInfoCache().getDL();
1587   if (const auto *CB = dyn_cast<CallBase>(I)) {
1588     if (CB->isBundleOperand(U)) {
1589       if (RetainedKnowledge RK = getKnowledgeFromUse(
1590               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1591         IsNonNull |=
1592             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1593         return RK.ArgValue;
1594       }
1595       return 0;
1596     }
1597 
1598     if (CB->isCallee(U)) {
1599       IsNonNull |= !NullPointerIsDefined;
1600       return 0;
1601     }
1602 
1603     unsigned ArgNo = CB->getArgOperandNo(U);
1604     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1605     // As long as we only use known information there is no need to track
1606     // dependences here.
1607     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1608                                                   /* TrackDependence */ false);
1609     IsNonNull |= DerefAA.isKnownNonNull();
1610     return DerefAA.getKnownDereferenceableBytes();
1611   }
1612 
1613   // We need to follow common pointer manipulation uses to the accesses they
1614   // feed into. We can try to be smart to avoid looking through things we do not
1615   // like for now, e.g., non-inbounds GEPs.
1616   if (isa<CastInst>(I)) {
1617     TrackUse = true;
1618     return 0;
1619   }
1620   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1621     if (GEP->hasAllConstantIndices()) {
1622       TrackUse = true;
1623       return 0;
1624     }
1625 
1626   int64_t Offset;
1627   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1628     if (Base == &AssociatedValue &&
1629         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1630       int64_t DerefBytes =
1631           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1632 
1633       IsNonNull |= !NullPointerIsDefined;
1634       return std::max(int64_t(0), DerefBytes);
1635     }
1636   }
1637 
1638   /// Corner case when an offset is 0.
1639   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1640           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1641     if (Offset == 0 && Base == &AssociatedValue &&
1642         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1643       int64_t DerefBytes =
1644           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1645       IsNonNull |= !NullPointerIsDefined;
1646       return std::max(int64_t(0), DerefBytes);
1647     }
1648   }
1649 
1650   return 0;
1651 }
1652 
1653 struct AANonNullImpl : AANonNull {
1654   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1655       : AANonNull(IRP, A),
1656         NullIsDefined(NullPointerIsDefined(
1657             getAnchorScope(),
1658             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1659 
1660   /// See AbstractAttribute::initialize(...).
1661   void initialize(Attributor &A) override {
1662     if (!NullIsDefined &&
1663         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1664                 /* IgnoreSubsumingPositions */ false, &A))
1665       indicateOptimisticFixpoint();
1666     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1667       indicatePessimisticFixpoint();
1668     else
1669       AANonNull::initialize(A);
1670   }
1671 
1672   /// See AAFromMustBeExecutedContext
1673   bool followUse(Attributor &A, const Use *U, const Instruction *I,
1674                  AANonNull::StateType &State) {
1675     bool IsNonNull = false;
1676     bool TrackUse = false;
1677     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1678                                        IsNonNull, TrackUse);
1679     State.setKnown(IsNonNull);
1680     return TrackUse;
1681   }
1682 
1683   /// See AbstractAttribute::getAsStr().
1684   const std::string getAsStr() const override {
1685     return getAssumed() ? "nonnull" : "may-null";
1686   }
1687 
1688   /// Flag to determine if the underlying value can be null and still allow
1689   /// valid accesses.
1690   const bool NullIsDefined;
1691 };
1692 
1693 /// NonNull attribute for a floating value.
1694 struct AANonNullFloating
1695     : AAFromMustBeExecutedContext<AANonNull, AANonNullImpl> {
1696   using Base = AAFromMustBeExecutedContext<AANonNull, AANonNullImpl>;
1697   AANonNullFloating(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
1698 
1699   /// See AbstractAttribute::updateImpl(...).
1700   ChangeStatus updateImpl(Attributor &A) override {
1701     ChangeStatus Change = Base::updateImpl(A);
1702     if (isKnownNonNull())
1703       return Change;
1704 
1705     if (!NullIsDefined) {
1706       const auto &DerefAA =
1707           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1708       if (DerefAA.getAssumedDereferenceableBytes())
1709         return Change;
1710     }
1711 
1712     const DataLayout &DL = A.getDataLayout();
1713 
1714     DominatorTree *DT = nullptr;
1715     AssumptionCache *AC = nullptr;
1716     InformationCache &InfoCache = A.getInfoCache();
1717     if (const Function *Fn = getAnchorScope()) {
1718       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1719       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1720     }
1721 
1722     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1723                             AANonNull::StateType &T, bool Stripped) -> bool {
1724       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1725       if (!Stripped && this == &AA) {
1726         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1727           T.indicatePessimisticFixpoint();
1728       } else {
1729         // Use abstract attribute information.
1730         const AANonNull::StateType &NS =
1731             static_cast<const AANonNull::StateType &>(AA.getState());
1732         T ^= NS;
1733       }
1734       return T.isValidState();
1735     };
1736 
1737     StateType T;
1738     if (!genericValueTraversal<AANonNull, StateType>(
1739             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1740       return indicatePessimisticFixpoint();
1741 
1742     return clampStateAndIndicateChange(getState(), T);
1743   }
1744 
1745   /// See AbstractAttribute::trackStatistics()
1746   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1747 };
1748 
1749 /// NonNull attribute for function return value.
1750 struct AANonNullReturned final
1751     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1752   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1753       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1754 
1755   /// See AbstractAttribute::trackStatistics()
1756   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1757 };
1758 
1759 /// NonNull attribute for function argument.
1760 struct AANonNullArgument final
1761     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1762                                                               AANonNullImpl> {
1763   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1764       : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AANonNull,
1765                                                                 AANonNullImpl>(
1766             IRP, A) {}
1767 
1768   /// See AbstractAttribute::trackStatistics()
1769   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1770 };
1771 
1772 struct AANonNullCallSiteArgument final : AANonNullFloating {
1773   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1774       : AANonNullFloating(IRP, A) {}
1775 
1776   /// See AbstractAttribute::trackStatistics()
1777   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1778 };
1779 
1780 /// NonNull attribute for a call site return position.
1781 struct AANonNullCallSiteReturned final
1782     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1783                                                              AANonNullImpl> {
1784   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1785       : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AANonNull,
1786                                                                AANonNullImpl>(
1787             IRP, A) {}
1788 
1789   /// See AbstractAttribute::trackStatistics()
1790   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1791 };
1792 
1793 /// ------------------------ No-Recurse Attributes ----------------------------
1794 
1795 struct AANoRecurseImpl : public AANoRecurse {
1796   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1797 
1798   /// See AbstractAttribute::getAsStr()
1799   const std::string getAsStr() const override {
1800     return getAssumed() ? "norecurse" : "may-recurse";
1801   }
1802 };
1803 
1804 struct AANoRecurseFunction final : AANoRecurseImpl {
1805   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1806       : AANoRecurseImpl(IRP, A) {}
1807 
1808   /// See AbstractAttribute::initialize(...).
1809   void initialize(Attributor &A) override {
1810     AANoRecurseImpl::initialize(A);
1811     if (const Function *F = getAnchorScope())
1812       if (A.getInfoCache().getSccSize(*F) != 1)
1813         indicatePessimisticFixpoint();
1814   }
1815 
1816   /// See AbstractAttribute::updateImpl(...).
1817   ChangeStatus updateImpl(Attributor &A) override {
1818 
1819     // If all live call sites are known to be no-recurse, we are as well.
1820     auto CallSitePred = [&](AbstractCallSite ACS) {
1821       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1822           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1823           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1824       return NoRecurseAA.isKnownNoRecurse();
1825     };
1826     bool AllCallSitesKnown;
1827     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1828       // If we know all call sites and all are known no-recurse, we are done.
1829       // If all known call sites, which might not be all that exist, are known
1830       // to be no-recurse, we are not done but we can continue to assume
1831       // no-recurse. If one of the call sites we have not visited will become
1832       // live, another update is triggered.
1833       if (AllCallSitesKnown)
1834         indicateOptimisticFixpoint();
1835       return ChangeStatus::UNCHANGED;
1836     }
1837 
1838     // If the above check does not hold anymore we look at the calls.
1839     auto CheckForNoRecurse = [&](Instruction &I) {
1840       const auto &CB = cast<CallBase>(I);
1841       if (CB.hasFnAttr(Attribute::NoRecurse))
1842         return true;
1843 
1844       const auto &NoRecurseAA =
1845           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1846       if (!NoRecurseAA.isAssumedNoRecurse())
1847         return false;
1848 
1849       // Recursion to the same function
1850       if (CB.getCalledFunction() == getAnchorScope())
1851         return false;
1852 
1853       return true;
1854     };
1855 
1856     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1857       return indicatePessimisticFixpoint();
1858     return ChangeStatus::UNCHANGED;
1859   }
1860 
1861   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1862 };
1863 
1864 /// NoRecurse attribute deduction for a call sites.
1865 struct AANoRecurseCallSite final : AANoRecurseImpl {
1866   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1867       : AANoRecurseImpl(IRP, A) {}
1868 
1869   /// See AbstractAttribute::initialize(...).
1870   void initialize(Attributor &A) override {
1871     AANoRecurseImpl::initialize(A);
1872     Function *F = getAssociatedFunction();
1873     if (!F)
1874       indicatePessimisticFixpoint();
1875   }
1876 
1877   /// See AbstractAttribute::updateImpl(...).
1878   ChangeStatus updateImpl(Attributor &A) override {
1879     // TODO: Once we have call site specific value information we can provide
1880     //       call site specific liveness information and then it makes
1881     //       sense to specialize attributes for call sites arguments instead of
1882     //       redirecting requests to the callee argument.
1883     Function *F = getAssociatedFunction();
1884     const IRPosition &FnPos = IRPosition::function(*F);
1885     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1886     return clampStateAndIndicateChange(
1887         getState(),
1888         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1889   }
1890 
1891   /// See AbstractAttribute::trackStatistics()
1892   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1893 };
1894 
1895 /// -------------------- Undefined-Behavior Attributes ------------------------
1896 
1897 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1898   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1899       : AAUndefinedBehavior(IRP, A) {}
1900 
1901   /// See AbstractAttribute::updateImpl(...).
1902   // through a pointer (i.e. also branches etc.)
1903   ChangeStatus updateImpl(Attributor &A) override {
1904     const size_t UBPrevSize = KnownUBInsts.size();
1905     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1906 
1907     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1908       // Skip instructions that are already saved.
1909       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1910         return true;
1911 
1912       // If we reach here, we know we have an instruction
1913       // that accesses memory through a pointer operand,
1914       // for which getPointerOperand() should give it to us.
1915       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1916       assert(PtrOp &&
1917              "Expected pointer operand of memory accessing instruction");
1918 
1919       // Either we stopped and the appropriate action was taken,
1920       // or we got back a simplified value to continue.
1921       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1922       if (!SimplifiedPtrOp.hasValue())
1923         return true;
1924       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1925 
1926       // A memory access through a pointer is considered UB
1927       // only if the pointer has constant null value.
1928       // TODO: Expand it to not only check constant values.
1929       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1930         AssumedNoUBInsts.insert(&I);
1931         return true;
1932       }
1933       const Type *PtrTy = PtrOpVal->getType();
1934 
1935       // Because we only consider instructions inside functions,
1936       // assume that a parent function exists.
1937       const Function *F = I.getFunction();
1938 
1939       // A memory access using constant null pointer is only considered UB
1940       // if null pointer is _not_ defined for the target platform.
1941       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1942         AssumedNoUBInsts.insert(&I);
1943       else
1944         KnownUBInsts.insert(&I);
1945       return true;
1946     };
1947 
1948     auto InspectBrInstForUB = [&](Instruction &I) {
1949       // A conditional branch instruction is considered UB if it has `undef`
1950       // condition.
1951 
1952       // Skip instructions that are already saved.
1953       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1954         return true;
1955 
1956       // We know we have a branch instruction.
1957       auto BrInst = cast<BranchInst>(&I);
1958 
1959       // Unconditional branches are never considered UB.
1960       if (BrInst->isUnconditional())
1961         return true;
1962 
1963       // Either we stopped and the appropriate action was taken,
1964       // or we got back a simplified value to continue.
1965       Optional<Value *> SimplifiedCond =
1966           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1967       if (!SimplifiedCond.hasValue())
1968         return true;
1969       AssumedNoUBInsts.insert(&I);
1970       return true;
1971     };
1972 
1973     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1974                               {Instruction::Load, Instruction::Store,
1975                                Instruction::AtomicCmpXchg,
1976                                Instruction::AtomicRMW},
1977                               /* CheckBBLivenessOnly */ true);
1978     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1979                               /* CheckBBLivenessOnly */ true);
1980     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1981         UBPrevSize != KnownUBInsts.size())
1982       return ChangeStatus::CHANGED;
1983     return ChangeStatus::UNCHANGED;
1984   }
1985 
1986   bool isKnownToCauseUB(Instruction *I) const override {
1987     return KnownUBInsts.count(I);
1988   }
1989 
1990   bool isAssumedToCauseUB(Instruction *I) const override {
1991     // In simple words, if an instruction is not in the assumed to _not_
1992     // cause UB, then it is assumed UB (that includes those
1993     // in the KnownUBInsts set). The rest is boilerplate
1994     // is to ensure that it is one of the instructions we test
1995     // for UB.
1996 
1997     switch (I->getOpcode()) {
1998     case Instruction::Load:
1999     case Instruction::Store:
2000     case Instruction::AtomicCmpXchg:
2001     case Instruction::AtomicRMW:
2002       return !AssumedNoUBInsts.count(I);
2003     case Instruction::Br: {
2004       auto BrInst = cast<BranchInst>(I);
2005       if (BrInst->isUnconditional())
2006         return false;
2007       return !AssumedNoUBInsts.count(I);
2008     } break;
2009     default:
2010       return false;
2011     }
2012     return false;
2013   }
2014 
2015   ChangeStatus manifest(Attributor &A) override {
2016     if (KnownUBInsts.empty())
2017       return ChangeStatus::UNCHANGED;
2018     for (Instruction *I : KnownUBInsts)
2019       A.changeToUnreachableAfterManifest(I);
2020     return ChangeStatus::CHANGED;
2021   }
2022 
2023   /// See AbstractAttribute::getAsStr()
2024   const std::string getAsStr() const override {
2025     return getAssumed() ? "undefined-behavior" : "no-ub";
2026   }
2027 
2028   /// Note: The correctness of this analysis depends on the fact that the
2029   /// following 2 sets will stop changing after some point.
2030   /// "Change" here means that their size changes.
2031   /// The size of each set is monotonically increasing
2032   /// (we only add items to them) and it is upper bounded by the number of
2033   /// instructions in the processed function (we can never save more
2034   /// elements in either set than this number). Hence, at some point,
2035   /// they will stop increasing.
2036   /// Consequently, at some point, both sets will have stopped
2037   /// changing, effectively making the analysis reach a fixpoint.
2038 
2039   /// Note: These 2 sets are disjoint and an instruction can be considered
2040   /// one of 3 things:
2041   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2042   ///    the KnownUBInsts set.
2043   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2044   ///    has a reason to assume it).
2045   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2046   ///    could not find a reason to assume or prove that it can cause UB,
2047   ///    hence it assumes it doesn't. We have a set for these instructions
2048   ///    so that we don't reprocess them in every update.
2049   ///    Note however that instructions in this set may cause UB.
2050 
2051 protected:
2052   /// A set of all live instructions _known_ to cause UB.
2053   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2054 
2055 private:
2056   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2057   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2058 
2059   // Should be called on updates in which if we're processing an instruction
2060   // \p I that depends on a value \p V, one of the following has to happen:
2061   // - If the value is assumed, then stop.
2062   // - If the value is known but undef, then consider it UB.
2063   // - Otherwise, do specific processing with the simplified value.
2064   // We return None in the first 2 cases to signify that an appropriate
2065   // action was taken and the caller should stop.
2066   // Otherwise, we return the simplified value that the caller should
2067   // use for specific processing.
2068   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2069                                          Instruction *I) {
2070     const auto &ValueSimplifyAA =
2071         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2072     Optional<Value *> SimplifiedV =
2073         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2074     if (!ValueSimplifyAA.isKnown()) {
2075       // Don't depend on assumed values.
2076       return llvm::None;
2077     }
2078     if (!SimplifiedV.hasValue()) {
2079       // If it is known (which we tested above) but it doesn't have a value,
2080       // then we can assume `undef` and hence the instruction is UB.
2081       KnownUBInsts.insert(I);
2082       return llvm::None;
2083     }
2084     Value *Val = SimplifiedV.getValue();
2085     if (isa<UndefValue>(Val)) {
2086       KnownUBInsts.insert(I);
2087       return llvm::None;
2088     }
2089     return Val;
2090   }
2091 };
2092 
2093 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2094   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2095       : AAUndefinedBehaviorImpl(IRP, A) {}
2096 
2097   /// See AbstractAttribute::trackStatistics()
2098   void trackStatistics() const override {
2099     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2100                "Number of instructions known to have UB");
2101     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2102         KnownUBInsts.size();
2103   }
2104 };
2105 
2106 /// ------------------------ Will-Return Attributes ----------------------------
2107 
2108 // Helper function that checks whether a function has any cycle which we don't
2109 // know if it is bounded or not.
2110 // Loops with maximum trip count are considered bounded, any other cycle not.
2111 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2112   ScalarEvolution *SE =
2113       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2114   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2115   // If either SCEV or LoopInfo is not available for the function then we assume
2116   // any cycle to be unbounded cycle.
2117   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2118   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2119   if (!SE || !LI) {
2120     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2121       if (SCCI.hasCycle())
2122         return true;
2123     return false;
2124   }
2125 
2126   // If there's irreducible control, the function may contain non-loop cycles.
2127   if (mayContainIrreducibleControl(F, LI))
2128     return true;
2129 
2130   // Any loop that does not have a max trip count is considered unbounded cycle.
2131   for (auto *L : LI->getLoopsInPreorder()) {
2132     if (!SE->getSmallConstantMaxTripCount(L))
2133       return true;
2134   }
2135   return false;
2136 }
2137 
2138 struct AAWillReturnImpl : public AAWillReturn {
2139   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2140       : AAWillReturn(IRP, A) {}
2141 
2142   /// See AbstractAttribute::initialize(...).
2143   void initialize(Attributor &A) override {
2144     AAWillReturn::initialize(A);
2145 
2146     Function *F = getAnchorScope();
2147     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2148       indicatePessimisticFixpoint();
2149   }
2150 
2151   /// See AbstractAttribute::updateImpl(...).
2152   ChangeStatus updateImpl(Attributor &A) override {
2153     auto CheckForWillReturn = [&](Instruction &I) {
2154       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2155       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2156       if (WillReturnAA.isKnownWillReturn())
2157         return true;
2158       if (!WillReturnAA.isAssumedWillReturn())
2159         return false;
2160       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2161       return NoRecurseAA.isAssumedNoRecurse();
2162     };
2163 
2164     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2165       return indicatePessimisticFixpoint();
2166 
2167     return ChangeStatus::UNCHANGED;
2168   }
2169 
2170   /// See AbstractAttribute::getAsStr()
2171   const std::string getAsStr() const override {
2172     return getAssumed() ? "willreturn" : "may-noreturn";
2173   }
2174 };
2175 
2176 struct AAWillReturnFunction final : AAWillReturnImpl {
2177   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2178       : AAWillReturnImpl(IRP, A) {}
2179 
2180   /// See AbstractAttribute::trackStatistics()
2181   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2182 };
2183 
2184 /// WillReturn attribute deduction for a call sites.
2185 struct AAWillReturnCallSite final : AAWillReturnImpl {
2186   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2187       : AAWillReturnImpl(IRP, A) {}
2188 
2189   /// See AbstractAttribute::initialize(...).
2190   void initialize(Attributor &A) override {
2191     AAWillReturnImpl::initialize(A);
2192     Function *F = getAssociatedFunction();
2193     if (!F)
2194       indicatePessimisticFixpoint();
2195   }
2196 
2197   /// See AbstractAttribute::updateImpl(...).
2198   ChangeStatus updateImpl(Attributor &A) override {
2199     // TODO: Once we have call site specific value information we can provide
2200     //       call site specific liveness information and then it makes
2201     //       sense to specialize attributes for call sites arguments instead of
2202     //       redirecting requests to the callee argument.
2203     Function *F = getAssociatedFunction();
2204     const IRPosition &FnPos = IRPosition::function(*F);
2205     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2206     return clampStateAndIndicateChange(
2207         getState(),
2208         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2209   }
2210 
2211   /// See AbstractAttribute::trackStatistics()
2212   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2213 };
2214 
2215 /// -------------------AAReachability Attribute--------------------------
2216 
2217 struct AAReachabilityImpl : AAReachability {
2218   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2219       : AAReachability(IRP, A) {}
2220 
2221   const std::string getAsStr() const override {
2222     // TODO: Return the number of reachable queries.
2223     return "reachable";
2224   }
2225 
2226   /// See AbstractAttribute::initialize(...).
2227   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2228 
2229   /// See AbstractAttribute::updateImpl(...).
2230   ChangeStatus updateImpl(Attributor &A) override {
2231     return indicatePessimisticFixpoint();
2232   }
2233 };
2234 
2235 struct AAReachabilityFunction final : public AAReachabilityImpl {
2236   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2237       : AAReachabilityImpl(IRP, A) {}
2238 
2239   /// See AbstractAttribute::trackStatistics()
2240   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2241 };
2242 
2243 /// ------------------------ NoAlias Argument Attribute ------------------------
2244 
2245 struct AANoAliasImpl : AANoAlias {
2246   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2247     assert(getAssociatedType()->isPointerTy() &&
2248            "Noalias is a pointer attribute");
2249   }
2250 
2251   const std::string getAsStr() const override {
2252     return getAssumed() ? "noalias" : "may-alias";
2253   }
2254 };
2255 
2256 /// NoAlias attribute for a floating value.
2257 struct AANoAliasFloating final : AANoAliasImpl {
2258   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2259       : AANoAliasImpl(IRP, A) {}
2260 
2261   /// See AbstractAttribute::initialize(...).
2262   void initialize(Attributor &A) override {
2263     AANoAliasImpl::initialize(A);
2264     Value *Val = &getAssociatedValue();
2265     do {
2266       CastInst *CI = dyn_cast<CastInst>(Val);
2267       if (!CI)
2268         break;
2269       Value *Base = CI->getOperand(0);
2270       if (Base->getNumUses() != 1)
2271         break;
2272       Val = Base;
2273     } while (true);
2274 
2275     if (!Val->getType()->isPointerTy()) {
2276       indicatePessimisticFixpoint();
2277       return;
2278     }
2279 
2280     if (isa<AllocaInst>(Val))
2281       indicateOptimisticFixpoint();
2282     else if (isa<ConstantPointerNull>(Val) &&
2283              !NullPointerIsDefined(getAnchorScope(),
2284                                    Val->getType()->getPointerAddressSpace()))
2285       indicateOptimisticFixpoint();
2286     else if (Val != &getAssociatedValue()) {
2287       const auto &ValNoAliasAA =
2288           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2289       if (ValNoAliasAA.isKnownNoAlias())
2290         indicateOptimisticFixpoint();
2291     }
2292   }
2293 
2294   /// See AbstractAttribute::updateImpl(...).
2295   ChangeStatus updateImpl(Attributor &A) override {
2296     // TODO: Implement this.
2297     return indicatePessimisticFixpoint();
2298   }
2299 
2300   /// See AbstractAttribute::trackStatistics()
2301   void trackStatistics() const override {
2302     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2303   }
2304 };
2305 
2306 /// NoAlias attribute for an argument.
2307 struct AANoAliasArgument final
2308     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2309   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2310   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2311 
2312   /// See AbstractAttribute::initialize(...).
2313   void initialize(Attributor &A) override {
2314     Base::initialize(A);
2315     // See callsite argument attribute and callee argument attribute.
2316     if (hasAttr({Attribute::ByVal}))
2317       indicateOptimisticFixpoint();
2318   }
2319 
2320   /// See AbstractAttribute::update(...).
2321   ChangeStatus updateImpl(Attributor &A) override {
2322     // We have to make sure no-alias on the argument does not break
2323     // synchronization when this is a callback argument, see also [1] below.
2324     // If synchronization cannot be affected, we delegate to the base updateImpl
2325     // function, otherwise we give up for now.
2326 
2327     // If the function is no-sync, no-alias cannot break synchronization.
2328     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2329         *this, IRPosition::function_scope(getIRPosition()));
2330     if (NoSyncAA.isAssumedNoSync())
2331       return Base::updateImpl(A);
2332 
2333     // If the argument is read-only, no-alias cannot break synchronization.
2334     const auto &MemBehaviorAA =
2335         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2336     if (MemBehaviorAA.isAssumedReadOnly())
2337       return Base::updateImpl(A);
2338 
2339     // If the argument is never passed through callbacks, no-alias cannot break
2340     // synchronization.
2341     bool AllCallSitesKnown;
2342     if (A.checkForAllCallSites(
2343             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2344             true, AllCallSitesKnown))
2345       return Base::updateImpl(A);
2346 
2347     // TODO: add no-alias but make sure it doesn't break synchronization by
2348     // introducing fake uses. See:
2349     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2350     //     International Workshop on OpenMP 2018,
2351     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2352 
2353     return indicatePessimisticFixpoint();
2354   }
2355 
2356   /// See AbstractAttribute::trackStatistics()
2357   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2358 };
2359 
2360 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2361   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2362       : AANoAliasImpl(IRP, A) {}
2363 
2364   /// See AbstractAttribute::initialize(...).
2365   void initialize(Attributor &A) override {
2366     // See callsite argument attribute and callee argument attribute.
2367     const auto &CB = cast<CallBase>(getAnchorValue());
2368     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2369       indicateOptimisticFixpoint();
2370     Value &Val = getAssociatedValue();
2371     if (isa<ConstantPointerNull>(Val) &&
2372         !NullPointerIsDefined(getAnchorScope(),
2373                               Val.getType()->getPointerAddressSpace()))
2374       indicateOptimisticFixpoint();
2375   }
2376 
2377   /// Determine if the underlying value may alias with the call site argument
2378   /// \p OtherArgNo of \p ICS (= the underlying call site).
2379   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2380                             const AAMemoryBehavior &MemBehaviorAA,
2381                             const CallBase &CB, unsigned OtherArgNo) {
2382     // We do not need to worry about aliasing with the underlying IRP.
2383     if (this->getArgNo() == (int)OtherArgNo)
2384       return false;
2385 
2386     // If it is not a pointer or pointer vector we do not alias.
2387     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2388     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2389       return false;
2390 
2391     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2392         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2393         /* TrackDependence */ false);
2394 
2395     // If the argument is readnone, there is no read-write aliasing.
2396     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2397       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2398       return false;
2399     }
2400 
2401     // If the argument is readonly and the underlying value is readonly, there
2402     // is no read-write aliasing.
2403     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2404     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2405       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2406       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2407       return false;
2408     }
2409 
2410     // We have to utilize actual alias analysis queries so we need the object.
2411     if (!AAR)
2412       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2413 
2414     // Try to rule it out at the call site.
2415     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2416     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2417                          "callsite arguments: "
2418                       << getAssociatedValue() << " " << *ArgOp << " => "
2419                       << (IsAliasing ? "" : "no-") << "alias \n");
2420 
2421     return IsAliasing;
2422   }
2423 
2424   bool
2425   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2426                                          const AAMemoryBehavior &MemBehaviorAA,
2427                                          const AANoAlias &NoAliasAA) {
2428     // We can deduce "noalias" if the following conditions hold.
2429     // (i)   Associated value is assumed to be noalias in the definition.
2430     // (ii)  Associated value is assumed to be no-capture in all the uses
2431     //       possibly executed before this callsite.
2432     // (iii) There is no other pointer argument which could alias with the
2433     //       value.
2434 
2435     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2436     if (!AssociatedValueIsNoAliasAtDef) {
2437       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2438                         << " is not no-alias at the definition\n");
2439       return false;
2440     }
2441 
2442     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2443 
2444     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2445     auto &NoCaptureAA =
2446         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2447     // Check whether the value is captured in the scope using AANoCapture.
2448     //      Look at CFG and check only uses possibly executed before this
2449     //      callsite.
2450     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2451       Instruction *UserI = cast<Instruction>(U.getUser());
2452 
2453       // If user if curr instr and only use.
2454       if ((UserI == getCtxI()) && (UserI->getNumUses() == 1))
2455         return true;
2456 
2457       const Function *ScopeFn = VIRP.getAnchorScope();
2458       if (ScopeFn) {
2459         const auto &ReachabilityAA =
2460             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2461 
2462         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2463           return true;
2464 
2465         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2466           if (CB->isArgOperand(&U)) {
2467 
2468             unsigned ArgNo = CB->getArgOperandNo(&U);
2469 
2470             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2471                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2472 
2473             if (NoCaptureAA.isAssumedNoCapture())
2474               return true;
2475           }
2476         }
2477       }
2478 
2479       // For cases which can potentially have more users
2480       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2481           isa<SelectInst>(U)) {
2482         Follow = true;
2483         return true;
2484       }
2485 
2486       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2487       return false;
2488     };
2489 
2490     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2491       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2492         LLVM_DEBUG(
2493             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2494                    << " cannot be noalias as it is potentially captured\n");
2495         return false;
2496       }
2497     }
2498     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2499 
2500     // Check there is no other pointer argument which could alias with the
2501     // value passed at this call site.
2502     // TODO: AbstractCallSite
2503     const auto &CB = cast<CallBase>(getAnchorValue());
2504     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2505          OtherArgNo++)
2506       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2507         return false;
2508 
2509     return true;
2510   }
2511 
2512   /// See AbstractAttribute::updateImpl(...).
2513   ChangeStatus updateImpl(Attributor &A) override {
2514     // If the argument is readnone we are done as there are no accesses via the
2515     // argument.
2516     auto &MemBehaviorAA =
2517         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2518                                      /* TrackDependence */ false);
2519     if (MemBehaviorAA.isAssumedReadNone()) {
2520       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2521       return ChangeStatus::UNCHANGED;
2522     }
2523 
2524     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2525     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2526                                                   /* TrackDependence */ false);
2527 
2528     AAResults *AAR = nullptr;
2529     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2530                                                NoAliasAA)) {
2531       LLVM_DEBUG(
2532           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2533       return ChangeStatus::UNCHANGED;
2534     }
2535 
2536     return indicatePessimisticFixpoint();
2537   }
2538 
2539   /// See AbstractAttribute::trackStatistics()
2540   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2541 };
2542 
2543 /// NoAlias attribute for function return value.
2544 struct AANoAliasReturned final : AANoAliasImpl {
2545   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2546       : AANoAliasImpl(IRP, A) {}
2547 
2548   /// See AbstractAttribute::updateImpl(...).
2549   virtual ChangeStatus updateImpl(Attributor &A) override {
2550 
2551     auto CheckReturnValue = [&](Value &RV) -> bool {
2552       if (Constant *C = dyn_cast<Constant>(&RV))
2553         if (C->isNullValue() || isa<UndefValue>(C))
2554           return true;
2555 
2556       /// For now, we can only deduce noalias if we have call sites.
2557       /// FIXME: add more support.
2558       if (!isa<CallBase>(&RV))
2559         return false;
2560 
2561       const IRPosition &RVPos = IRPosition::value(RV);
2562       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2563       if (!NoAliasAA.isAssumedNoAlias())
2564         return false;
2565 
2566       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2567       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2568     };
2569 
2570     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2571       return indicatePessimisticFixpoint();
2572 
2573     return ChangeStatus::UNCHANGED;
2574   }
2575 
2576   /// See AbstractAttribute::trackStatistics()
2577   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2578 };
2579 
2580 /// NoAlias attribute deduction for a call site return value.
2581 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2582   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2583       : AANoAliasImpl(IRP, A) {}
2584 
2585   /// See AbstractAttribute::initialize(...).
2586   void initialize(Attributor &A) override {
2587     AANoAliasImpl::initialize(A);
2588     Function *F = getAssociatedFunction();
2589     if (!F)
2590       indicatePessimisticFixpoint();
2591   }
2592 
2593   /// See AbstractAttribute::updateImpl(...).
2594   ChangeStatus updateImpl(Attributor &A) override {
2595     // TODO: Once we have call site specific value information we can provide
2596     //       call site specific liveness information and then it makes
2597     //       sense to specialize attributes for call sites arguments instead of
2598     //       redirecting requests to the callee argument.
2599     Function *F = getAssociatedFunction();
2600     const IRPosition &FnPos = IRPosition::returned(*F);
2601     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2602     return clampStateAndIndicateChange(
2603         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2604   }
2605 
2606   /// See AbstractAttribute::trackStatistics()
2607   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2608 };
2609 
2610 /// -------------------AAIsDead Function Attribute-----------------------
2611 
2612 struct AAIsDeadValueImpl : public AAIsDead {
2613   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2614 
2615   /// See AAIsDead::isAssumedDead().
2616   bool isAssumedDead() const override { return getAssumed(); }
2617 
2618   /// See AAIsDead::isKnownDead().
2619   bool isKnownDead() const override { return getKnown(); }
2620 
2621   /// See AAIsDead::isAssumedDead(BasicBlock *).
2622   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2623 
2624   /// See AAIsDead::isKnownDead(BasicBlock *).
2625   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2626 
2627   /// See AAIsDead::isAssumedDead(Instruction *I).
2628   bool isAssumedDead(const Instruction *I) const override {
2629     return I == getCtxI() && isAssumedDead();
2630   }
2631 
2632   /// See AAIsDead::isKnownDead(Instruction *I).
2633   bool isKnownDead(const Instruction *I) const override {
2634     return isAssumedDead(I) && getKnown();
2635   }
2636 
2637   /// See AbstractAttribute::getAsStr().
2638   const std::string getAsStr() const override {
2639     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2640   }
2641 
2642   /// Check if all uses are assumed dead.
2643   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2644     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2645     // Explicitly set the dependence class to required because we want a long
2646     // chain of N dependent instructions to be considered live as soon as one is
2647     // without going through N update cycles. This is not required for
2648     // correctness.
2649     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2650   }
2651 
2652   /// Determine if \p I is assumed to be side-effect free.
2653   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2654     if (!I || wouldInstructionBeTriviallyDead(I))
2655       return true;
2656 
2657     auto *CB = dyn_cast<CallBase>(I);
2658     if (!CB || isa<IntrinsicInst>(CB))
2659       return false;
2660 
2661     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2662     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2663     if (!NoUnwindAA.isAssumedNoUnwind())
2664       return false;
2665 
2666     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, CallIRP);
2667     if (!MemBehaviorAA.isAssumedReadOnly())
2668       return false;
2669 
2670     return true;
2671   }
2672 };
2673 
2674 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2675   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2676       : AAIsDeadValueImpl(IRP, A) {}
2677 
2678   /// See AbstractAttribute::initialize(...).
2679   void initialize(Attributor &A) override {
2680     if (isa<UndefValue>(getAssociatedValue())) {
2681       indicatePessimisticFixpoint();
2682       return;
2683     }
2684 
2685     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2686     if (!isAssumedSideEffectFree(A, I))
2687       indicatePessimisticFixpoint();
2688   }
2689 
2690   /// See AbstractAttribute::updateImpl(...).
2691   ChangeStatus updateImpl(Attributor &A) override {
2692     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2693     if (!isAssumedSideEffectFree(A, I))
2694       return indicatePessimisticFixpoint();
2695 
2696     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2697       return indicatePessimisticFixpoint();
2698     return ChangeStatus::UNCHANGED;
2699   }
2700 
2701   /// See AbstractAttribute::manifest(...).
2702   ChangeStatus manifest(Attributor &A) override {
2703     Value &V = getAssociatedValue();
2704     if (auto *I = dyn_cast<Instruction>(&V)) {
2705       // If we get here we basically know the users are all dead. We check if
2706       // isAssumedSideEffectFree returns true here again because it might not be
2707       // the case and only the users are dead but the instruction (=call) is
2708       // still needed.
2709       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2710         A.deleteAfterManifest(*I);
2711         return ChangeStatus::CHANGED;
2712       }
2713     }
2714     if (V.use_empty())
2715       return ChangeStatus::UNCHANGED;
2716 
2717     bool UsedAssumedInformation = false;
2718     Optional<Constant *> C =
2719         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2720     if (C.hasValue() && C.getValue())
2721       return ChangeStatus::UNCHANGED;
2722 
2723     // Replace the value with undef as it is dead but keep droppable uses around
2724     // as they provide information we don't want to give up on just yet.
2725     UndefValue &UV = *UndefValue::get(V.getType());
2726     bool AnyChange =
2727         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2728     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2729   }
2730 
2731   /// See AbstractAttribute::trackStatistics()
2732   void trackStatistics() const override {
2733     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2734   }
2735 };
2736 
2737 struct AAIsDeadArgument : public AAIsDeadFloating {
2738   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2739       : AAIsDeadFloating(IRP, A) {}
2740 
2741   /// See AbstractAttribute::initialize(...).
2742   void initialize(Attributor &A) override {
2743     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2744       indicatePessimisticFixpoint();
2745   }
2746 
2747   /// See AbstractAttribute::manifest(...).
2748   ChangeStatus manifest(Attributor &A) override {
2749     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2750     Argument &Arg = *getAssociatedArgument();
2751     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2752       if (A.registerFunctionSignatureRewrite(
2753               Arg, /* ReplacementTypes */ {},
2754               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2755               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2756         Arg.dropDroppableUses();
2757         return ChangeStatus::CHANGED;
2758       }
2759     return Changed;
2760   }
2761 
2762   /// See AbstractAttribute::trackStatistics()
2763   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2764 };
2765 
2766 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2767   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2768       : AAIsDeadValueImpl(IRP, A) {}
2769 
2770   /// See AbstractAttribute::initialize(...).
2771   void initialize(Attributor &A) override {
2772     if (isa<UndefValue>(getAssociatedValue()))
2773       indicatePessimisticFixpoint();
2774   }
2775 
2776   /// See AbstractAttribute::updateImpl(...).
2777   ChangeStatus updateImpl(Attributor &A) override {
2778     // TODO: Once we have call site specific value information we can provide
2779     //       call site specific liveness information and then it makes
2780     //       sense to specialize attributes for call sites arguments instead of
2781     //       redirecting requests to the callee argument.
2782     Argument *Arg = getAssociatedArgument();
2783     if (!Arg)
2784       return indicatePessimisticFixpoint();
2785     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2786     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2787     return clampStateAndIndicateChange(
2788         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2789   }
2790 
2791   /// See AbstractAttribute::manifest(...).
2792   ChangeStatus manifest(Attributor &A) override {
2793     CallBase &CB = cast<CallBase>(getAnchorValue());
2794     Use &U = CB.getArgOperandUse(getArgNo());
2795     assert(!isa<UndefValue>(U.get()) &&
2796            "Expected undef values to be filtered out!");
2797     UndefValue &UV = *UndefValue::get(U->getType());
2798     if (A.changeUseAfterManifest(U, UV))
2799       return ChangeStatus::CHANGED;
2800     return ChangeStatus::UNCHANGED;
2801   }
2802 
2803   /// See AbstractAttribute::trackStatistics()
2804   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2805 };
2806 
2807 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2808   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2809       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2810 
2811   /// See AAIsDead::isAssumedDead().
2812   bool isAssumedDead() const override {
2813     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2814   }
2815 
2816   /// See AbstractAttribute::initialize(...).
2817   void initialize(Attributor &A) override {
2818     if (isa<UndefValue>(getAssociatedValue())) {
2819       indicatePessimisticFixpoint();
2820       return;
2821     }
2822 
2823     // We track this separately as a secondary state.
2824     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2825   }
2826 
2827   /// See AbstractAttribute::updateImpl(...).
2828   ChangeStatus updateImpl(Attributor &A) override {
2829     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2830     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2831       IsAssumedSideEffectFree = false;
2832       Changed = ChangeStatus::CHANGED;
2833     }
2834 
2835     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2836       return indicatePessimisticFixpoint();
2837     return Changed;
2838   }
2839 
2840   /// See AbstractAttribute::trackStatistics()
2841   void trackStatistics() const override {
2842     if (IsAssumedSideEffectFree)
2843       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2844     else
2845       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2846   }
2847 
2848   /// See AbstractAttribute::getAsStr().
2849   const std::string getAsStr() const override {
2850     return isAssumedDead()
2851                ? "assumed-dead"
2852                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2853   }
2854 
2855 private:
2856   bool IsAssumedSideEffectFree;
2857 };
2858 
2859 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2860   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2861       : AAIsDeadValueImpl(IRP, A) {}
2862 
2863   /// See AbstractAttribute::updateImpl(...).
2864   ChangeStatus updateImpl(Attributor &A) override {
2865 
2866     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2867                               {Instruction::Ret});
2868 
2869     auto PredForCallSite = [&](AbstractCallSite ACS) {
2870       if (ACS.isCallbackCall() || !ACS.getInstruction())
2871         return false;
2872       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2873     };
2874 
2875     bool AllCallSitesKnown;
2876     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2877                                 AllCallSitesKnown))
2878       return indicatePessimisticFixpoint();
2879 
2880     return ChangeStatus::UNCHANGED;
2881   }
2882 
2883   /// See AbstractAttribute::manifest(...).
2884   ChangeStatus manifest(Attributor &A) override {
2885     // TODO: Rewrite the signature to return void?
2886     bool AnyChange = false;
2887     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2888     auto RetInstPred = [&](Instruction &I) {
2889       ReturnInst &RI = cast<ReturnInst>(I);
2890       if (!isa<UndefValue>(RI.getReturnValue()))
2891         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2892       return true;
2893     };
2894     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2895     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2896   }
2897 
2898   /// See AbstractAttribute::trackStatistics()
2899   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2900 };
2901 
2902 struct AAIsDeadFunction : public AAIsDead {
2903   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2904 
2905   /// See AbstractAttribute::initialize(...).
2906   void initialize(Attributor &A) override {
2907     const Function *F = getAnchorScope();
2908     if (F && !F->isDeclaration()) {
2909       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2910       assumeLive(A, F->getEntryBlock());
2911     }
2912   }
2913 
2914   /// See AbstractAttribute::getAsStr().
2915   const std::string getAsStr() const override {
2916     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2917            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2918            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2919            std::to_string(KnownDeadEnds.size()) + "]";
2920   }
2921 
2922   /// See AbstractAttribute::manifest(...).
2923   ChangeStatus manifest(Attributor &A) override {
2924     assert(getState().isValidState() &&
2925            "Attempted to manifest an invalid state!");
2926 
2927     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2928     Function &F = *getAnchorScope();
2929 
2930     if (AssumedLiveBlocks.empty()) {
2931       A.deleteAfterManifest(F);
2932       return ChangeStatus::CHANGED;
2933     }
2934 
2935     // Flag to determine if we can change an invoke to a call assuming the
2936     // callee is nounwind. This is not possible if the personality of the
2937     // function allows to catch asynchronous exceptions.
2938     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2939 
2940     KnownDeadEnds.set_union(ToBeExploredFrom);
2941     for (const Instruction *DeadEndI : KnownDeadEnds) {
2942       auto *CB = dyn_cast<CallBase>(DeadEndI);
2943       if (!CB)
2944         continue;
2945       const auto &NoReturnAA =
2946           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2947       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2948       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2949         continue;
2950 
2951       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2952         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2953       else
2954         A.changeToUnreachableAfterManifest(
2955             const_cast<Instruction *>(DeadEndI->getNextNode()));
2956       HasChanged = ChangeStatus::CHANGED;
2957     }
2958 
2959     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2960     for (BasicBlock &BB : F)
2961       if (!AssumedLiveBlocks.count(&BB)) {
2962         A.deleteAfterManifest(BB);
2963         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2964       }
2965 
2966     return HasChanged;
2967   }
2968 
2969   /// See AbstractAttribute::updateImpl(...).
2970   ChangeStatus updateImpl(Attributor &A) override;
2971 
2972   /// See AbstractAttribute::trackStatistics()
2973   void trackStatistics() const override {}
2974 
2975   /// Returns true if the function is assumed dead.
2976   bool isAssumedDead() const override { return false; }
2977 
2978   /// See AAIsDead::isKnownDead().
2979   bool isKnownDead() const override { return false; }
2980 
2981   /// See AAIsDead::isAssumedDead(BasicBlock *).
2982   bool isAssumedDead(const BasicBlock *BB) const override {
2983     assert(BB->getParent() == getAnchorScope() &&
2984            "BB must be in the same anchor scope function.");
2985 
2986     if (!getAssumed())
2987       return false;
2988     return !AssumedLiveBlocks.count(BB);
2989   }
2990 
2991   /// See AAIsDead::isKnownDead(BasicBlock *).
2992   bool isKnownDead(const BasicBlock *BB) const override {
2993     return getKnown() && isAssumedDead(BB);
2994   }
2995 
2996   /// See AAIsDead::isAssumed(Instruction *I).
2997   bool isAssumedDead(const Instruction *I) const override {
2998     assert(I->getParent()->getParent() == getAnchorScope() &&
2999            "Instruction must be in the same anchor scope function.");
3000 
3001     if (!getAssumed())
3002       return false;
3003 
3004     // If it is not in AssumedLiveBlocks then it for sure dead.
3005     // Otherwise, it can still be after noreturn call in a live block.
3006     if (!AssumedLiveBlocks.count(I->getParent()))
3007       return true;
3008 
3009     // If it is not after a liveness barrier it is live.
3010     const Instruction *PrevI = I->getPrevNode();
3011     while (PrevI) {
3012       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3013         return true;
3014       PrevI = PrevI->getPrevNode();
3015     }
3016     return false;
3017   }
3018 
3019   /// See AAIsDead::isKnownDead(Instruction *I).
3020   bool isKnownDead(const Instruction *I) const override {
3021     return getKnown() && isAssumedDead(I);
3022   }
3023 
3024   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3025   /// that internal function called from \p BB should now be looked at.
3026   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3027     if (!AssumedLiveBlocks.insert(&BB).second)
3028       return false;
3029 
3030     // We assume that all of BB is (probably) live now and if there are calls to
3031     // internal functions we will assume that those are now live as well. This
3032     // is a performance optimization for blocks with calls to a lot of internal
3033     // functions. It can however cause dead functions to be treated as live.
3034     for (const Instruction &I : BB)
3035       if (const auto *CB = dyn_cast<CallBase>(&I))
3036         if (const Function *F = CB->getCalledFunction())
3037           if (F->hasLocalLinkage())
3038             A.markLiveInternalFunction(*F);
3039     return true;
3040   }
3041 
3042   /// Collection of instructions that need to be explored again, e.g., we
3043   /// did assume they do not transfer control to (one of their) successors.
3044   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3045 
3046   /// Collection of instructions that are known to not transfer control.
3047   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3048 
3049   /// Collection of all assumed live BasicBlocks.
3050   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3051 };
3052 
3053 static bool
3054 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3055                         AbstractAttribute &AA,
3056                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3057   const IRPosition &IPos = IRPosition::callsite_function(CB);
3058 
3059   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
3060   if (NoReturnAA.isAssumedNoReturn())
3061     return !NoReturnAA.isKnownNoReturn();
3062   if (CB.isTerminator())
3063     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3064   else
3065     AliveSuccessors.push_back(CB.getNextNode());
3066   return false;
3067 }
3068 
3069 static bool
3070 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3071                         AbstractAttribute &AA,
3072                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3073   bool UsedAssumedInformation =
3074       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3075 
3076   // First, determine if we can change an invoke to a call assuming the
3077   // callee is nounwind. This is not possible if the personality of the
3078   // function allows to catch asynchronous exceptions.
3079   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3080     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3081   } else {
3082     const IRPosition &IPos = IRPosition::callsite_function(II);
3083     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3084     if (AANoUnw.isAssumedNoUnwind()) {
3085       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3086     } else {
3087       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3088     }
3089   }
3090   return UsedAssumedInformation;
3091 }
3092 
3093 static bool
3094 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3095                         AbstractAttribute &AA,
3096                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3097   bool UsedAssumedInformation = false;
3098   if (BI.getNumSuccessors() == 1) {
3099     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3100   } else {
3101     Optional<ConstantInt *> CI = getAssumedConstantInt(
3102         A, *BI.getCondition(), AA, UsedAssumedInformation);
3103     if (!CI.hasValue()) {
3104       // No value yet, assume both edges are dead.
3105     } else if (CI.getValue()) {
3106       const BasicBlock *SuccBB =
3107           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3108       AliveSuccessors.push_back(&SuccBB->front());
3109     } else {
3110       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3111       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3112       UsedAssumedInformation = false;
3113     }
3114   }
3115   return UsedAssumedInformation;
3116 }
3117 
3118 static bool
3119 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3120                         AbstractAttribute &AA,
3121                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3122   bool UsedAssumedInformation = false;
3123   Optional<ConstantInt *> CI =
3124       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3125   if (!CI.hasValue()) {
3126     // No value yet, assume all edges are dead.
3127   } else if (CI.getValue()) {
3128     for (auto &CaseIt : SI.cases()) {
3129       if (CaseIt.getCaseValue() == CI.getValue()) {
3130         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3131         return UsedAssumedInformation;
3132       }
3133     }
3134     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3135     return UsedAssumedInformation;
3136   } else {
3137     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3138       AliveSuccessors.push_back(&SuccBB->front());
3139   }
3140   return UsedAssumedInformation;
3141 }
3142 
3143 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3144   ChangeStatus Change = ChangeStatus::UNCHANGED;
3145 
3146   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3147                     << getAnchorScope()->size() << "] BBs and "
3148                     << ToBeExploredFrom.size() << " exploration points and "
3149                     << KnownDeadEnds.size() << " known dead ends\n");
3150 
3151   // Copy and clear the list of instructions we need to explore from. It is
3152   // refilled with instructions the next update has to look at.
3153   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3154                                                ToBeExploredFrom.end());
3155   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3156 
3157   SmallVector<const Instruction *, 8> AliveSuccessors;
3158   while (!Worklist.empty()) {
3159     const Instruction *I = Worklist.pop_back_val();
3160     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3161 
3162     AliveSuccessors.clear();
3163 
3164     bool UsedAssumedInformation = false;
3165     switch (I->getOpcode()) {
3166     // TODO: look for (assumed) UB to backwards propagate "deadness".
3167     default:
3168       if (I->isTerminator()) {
3169         for (const BasicBlock *SuccBB : successors(I->getParent()))
3170           AliveSuccessors.push_back(&SuccBB->front());
3171       } else {
3172         AliveSuccessors.push_back(I->getNextNode());
3173       }
3174       break;
3175     case Instruction::Call:
3176       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3177                                                        *this, AliveSuccessors);
3178       break;
3179     case Instruction::Invoke:
3180       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3181                                                        *this, AliveSuccessors);
3182       break;
3183     case Instruction::Br:
3184       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3185                                                        *this, AliveSuccessors);
3186       break;
3187     case Instruction::Switch:
3188       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3189                                                        *this, AliveSuccessors);
3190       break;
3191     }
3192 
3193     if (UsedAssumedInformation) {
3194       NewToBeExploredFrom.insert(I);
3195     } else {
3196       Change = ChangeStatus::CHANGED;
3197       if (AliveSuccessors.empty() ||
3198           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3199         KnownDeadEnds.insert(I);
3200     }
3201 
3202     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3203                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3204                       << UsedAssumedInformation << "\n");
3205 
3206     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3207       if (!I->isTerminator()) {
3208         assert(AliveSuccessors.size() == 1 &&
3209                "Non-terminator expected to have a single successor!");
3210         Worklist.push_back(AliveSuccessor);
3211       } else {
3212         if (assumeLive(A, *AliveSuccessor->getParent()))
3213           Worklist.push_back(AliveSuccessor);
3214       }
3215     }
3216   }
3217 
3218   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3219 
3220   // If we know everything is live there is no need to query for liveness.
3221   // Instead, indicating a pessimistic fixpoint will cause the state to be
3222   // "invalid" and all queries to be answered conservatively without lookups.
3223   // To be in this state we have to (1) finished the exploration and (3) not
3224   // discovered any non-trivial dead end and (2) not ruled unreachable code
3225   // dead.
3226   if (ToBeExploredFrom.empty() &&
3227       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3228       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3229         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3230       }))
3231     return indicatePessimisticFixpoint();
3232   return Change;
3233 }
3234 
3235 /// Liveness information for a call sites.
3236 struct AAIsDeadCallSite final : AAIsDeadFunction {
3237   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3238       : AAIsDeadFunction(IRP, A) {}
3239 
3240   /// See AbstractAttribute::initialize(...).
3241   void initialize(Attributor &A) override {
3242     // TODO: Once we have call site specific value information we can provide
3243     //       call site specific liveness information and then it makes
3244     //       sense to specialize attributes for call sites instead of
3245     //       redirecting requests to the callee.
3246     llvm_unreachable("Abstract attributes for liveness are not "
3247                      "supported for call sites yet!");
3248   }
3249 
3250   /// See AbstractAttribute::updateImpl(...).
3251   ChangeStatus updateImpl(Attributor &A) override {
3252     return indicatePessimisticFixpoint();
3253   }
3254 
3255   /// See AbstractAttribute::trackStatistics()
3256   void trackStatistics() const override {}
3257 };
3258 
3259 /// -------------------- Dereferenceable Argument Attribute --------------------
3260 
3261 template <>
3262 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3263                                                      const DerefState &R) {
3264   ChangeStatus CS0 =
3265       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3266   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3267   return CS0 | CS1;
3268 }
3269 
3270 struct AADereferenceableImpl : AADereferenceable {
3271   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3272       : AADereferenceable(IRP, A) {}
3273   using StateType = DerefState;
3274 
3275   void initialize(Attributor &A) override {
3276     SmallVector<Attribute, 4> Attrs;
3277     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3278              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3279     for (const Attribute &Attr : Attrs)
3280       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3281 
3282     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3283                                        /* TrackDependence */ false);
3284 
3285     const IRPosition &IRP = this->getIRPosition();
3286     bool IsFnInterface = IRP.isFnInterfaceKind();
3287     Function *FnScope = IRP.getAnchorScope();
3288     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope)))
3289       indicatePessimisticFixpoint();
3290   }
3291 
3292   /// See AbstractAttribute::getState()
3293   /// {
3294   StateType &getState() override { return *this; }
3295   const StateType &getState() const override { return *this; }
3296   /// }
3297 
3298   /// Helper function for collecting accessed bytes in must-be-executed-context
3299   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3300                               DerefState &State) {
3301     const Value *UseV = U->get();
3302     if (!UseV->getType()->isPointerTy())
3303       return;
3304 
3305     Type *PtrTy = UseV->getType();
3306     const DataLayout &DL = A.getDataLayout();
3307     int64_t Offset;
3308     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3309             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3310       if (Base == &getAssociatedValue() &&
3311           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3312         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3313         State.addAccessedBytes(Offset, Size);
3314       }
3315     }
3316     return;
3317   }
3318 
3319   /// See AAFromMustBeExecutedContext
3320   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3321                  AADereferenceable::StateType &State) {
3322     bool IsNonNull = false;
3323     bool TrackUse = false;
3324     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3325         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3326 
3327     addAccessedBytesForUse(A, U, I, State);
3328     State.takeKnownDerefBytesMaximum(DerefBytes);
3329     return TrackUse;
3330   }
3331 
3332   /// See AbstractAttribute::manifest(...).
3333   ChangeStatus manifest(Attributor &A) override {
3334     ChangeStatus Change = AADereferenceable::manifest(A);
3335     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3336       removeAttrs({Attribute::DereferenceableOrNull});
3337       return ChangeStatus::CHANGED;
3338     }
3339     return Change;
3340   }
3341 
3342   void getDeducedAttributes(LLVMContext &Ctx,
3343                             SmallVectorImpl<Attribute> &Attrs) const override {
3344     // TODO: Add *_globally support
3345     if (isAssumedNonNull())
3346       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3347           Ctx, getAssumedDereferenceableBytes()));
3348     else
3349       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3350           Ctx, getAssumedDereferenceableBytes()));
3351   }
3352 
3353   /// See AbstractAttribute::getAsStr().
3354   const std::string getAsStr() const override {
3355     if (!getAssumedDereferenceableBytes())
3356       return "unknown-dereferenceable";
3357     return std::string("dereferenceable") +
3358            (isAssumedNonNull() ? "" : "_or_null") +
3359            (isAssumedGlobal() ? "_globally" : "") + "<" +
3360            std::to_string(getKnownDereferenceableBytes()) + "-" +
3361            std::to_string(getAssumedDereferenceableBytes()) + ">";
3362   }
3363 };
3364 
3365 /// Dereferenceable attribute for a floating value.
3366 struct AADereferenceableFloating
3367     : AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl> {
3368   using Base =
3369       AAFromMustBeExecutedContext<AADereferenceable, AADereferenceableImpl>;
3370   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3371       : Base(IRP, A) {}
3372 
3373   /// See AbstractAttribute::updateImpl(...).
3374   ChangeStatus updateImpl(Attributor &A) override {
3375     ChangeStatus Change = Base::updateImpl(A);
3376 
3377     const DataLayout &DL = A.getDataLayout();
3378 
3379     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3380                             bool Stripped) -> bool {
3381       unsigned IdxWidth =
3382           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3383       APInt Offset(IdxWidth, 0);
3384       const Value *Base =
3385           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3386 
3387       const auto &AA =
3388           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3389       int64_t DerefBytes = 0;
3390       if (!Stripped && this == &AA) {
3391         // Use IR information if we did not strip anything.
3392         // TODO: track globally.
3393         bool CanBeNull;
3394         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3395         T.GlobalState.indicatePessimisticFixpoint();
3396       } else {
3397         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3398         DerefBytes = DS.DerefBytesState.getAssumed();
3399         T.GlobalState &= DS.GlobalState;
3400       }
3401 
3402       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3403 
3404       // For now we do not try to "increase" dereferenceability due to negative
3405       // indices as we first have to come up with code to deal with loops and
3406       // for overflows of the dereferenceable bytes.
3407       int64_t OffsetSExt = Offset.getSExtValue();
3408       if (OffsetSExt < 0)
3409         OffsetSExt = 0;
3410 
3411       T.takeAssumedDerefBytesMinimum(
3412           std::max(int64_t(0), DerefBytes - OffsetSExt));
3413 
3414       if (this == &AA) {
3415         if (!Stripped) {
3416           // If nothing was stripped IR information is all we got.
3417           T.takeKnownDerefBytesMaximum(
3418               std::max(int64_t(0), DerefBytes - OffsetSExt));
3419           T.indicatePessimisticFixpoint();
3420         } else if (OffsetSExt > 0) {
3421           // If something was stripped but there is circular reasoning we look
3422           // for the offset. If it is positive we basically decrease the
3423           // dereferenceable bytes in a circluar loop now, which will simply
3424           // drive them down to the known value in a very slow way which we
3425           // can accelerate.
3426           T.indicatePessimisticFixpoint();
3427         }
3428       }
3429 
3430       return T.isValidState();
3431     };
3432 
3433     DerefState T;
3434     if (!genericValueTraversal<AADereferenceable, DerefState>(
3435             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3436       return indicatePessimisticFixpoint();
3437 
3438     return Change | clampStateAndIndicateChange(getState(), T);
3439   }
3440 
3441   /// See AbstractAttribute::trackStatistics()
3442   void trackStatistics() const override {
3443     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3444   }
3445 };
3446 
3447 /// Dereferenceable attribute for a return value.
3448 struct AADereferenceableReturned final
3449     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3450   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3451       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3452             IRP, A) {}
3453 
3454   /// See AbstractAttribute::trackStatistics()
3455   void trackStatistics() const override {
3456     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3457   }
3458 };
3459 
3460 /// Dereferenceable attribute for an argument
3461 struct AADereferenceableArgument final
3462     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3463           AADereferenceable, AADereferenceableImpl> {
3464   using Base = AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<
3465       AADereferenceable, AADereferenceableImpl>;
3466   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3467       : Base(IRP, A) {}
3468 
3469   /// See AbstractAttribute::trackStatistics()
3470   void trackStatistics() const override {
3471     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3472   }
3473 };
3474 
3475 /// Dereferenceable attribute for a call site argument.
3476 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3477   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3478       : AADereferenceableFloating(IRP, A) {}
3479 
3480   /// See AbstractAttribute::trackStatistics()
3481   void trackStatistics() const override {
3482     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3483   }
3484 };
3485 
3486 /// Dereferenceable attribute deduction for a call site return value.
3487 struct AADereferenceableCallSiteReturned final
3488     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3489           AADereferenceable, AADereferenceableImpl> {
3490   using Base = AACallSiteReturnedFromReturnedAndMustBeExecutedContext<
3491       AADereferenceable, AADereferenceableImpl>;
3492   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3493       : Base(IRP, A) {}
3494 
3495   /// See AbstractAttribute::trackStatistics()
3496   void trackStatistics() const override {
3497     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3498   }
3499 };
3500 
3501 // ------------------------ Align Argument Attribute ------------------------
3502 
3503 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3504 /// the element type to be aligned.
3505 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3506                                                    const DataLayout &DL) {
3507   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3508   Type *ElementTy = Ptr->getType()->getPointerElementType();
3509   if (ElementTy->isSized())
3510     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3511   return KnownAlignment;
3512 }
3513 
3514 static unsigned getKnownAlignForUse(Attributor &A,
3515                                     AbstractAttribute &QueryingAA,
3516                                     Value &AssociatedValue, const Use *U,
3517                                     const Instruction *I, bool &TrackUse) {
3518   // We need to follow common pointer manipulation uses to the accesses they
3519   // feed into.
3520   if (isa<CastInst>(I)) {
3521     // Follow all but ptr2int casts.
3522     TrackUse = !isa<PtrToIntInst>(I);
3523     return 0;
3524   }
3525   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3526     if (GEP->hasAllConstantIndices()) {
3527       TrackUse = true;
3528       return 0;
3529     }
3530   }
3531 
3532   MaybeAlign MA;
3533   if (const auto *CB = dyn_cast<CallBase>(I)) {
3534     if (CB->isBundleOperand(U) || CB->isCallee(U))
3535       return 0;
3536 
3537     unsigned ArgNo = CB->getArgOperandNo(U);
3538     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3539     // As long as we only use known information there is no need to track
3540     // dependences here.
3541     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3542                                         /* TrackDependence */ false);
3543     MA = MaybeAlign(AlignAA.getKnownAlign());
3544   }
3545 
3546   const DataLayout &DL = A.getDataLayout();
3547   const Value *UseV = U->get();
3548   if (auto *SI = dyn_cast<StoreInst>(I)) {
3549     if (SI->getPointerOperand() == UseV) {
3550       if (unsigned SIAlign = SI->getAlignment())
3551         MA = MaybeAlign(SIAlign);
3552       else
3553         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3554     }
3555   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3556     if (LI->getPointerOperand() == UseV) {
3557       if (unsigned LIAlign = LI->getAlignment())
3558         MA = MaybeAlign(LIAlign);
3559       else
3560         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3561     }
3562   }
3563 
3564   if (!MA.hasValue() || MA <= 1)
3565     return 0;
3566 
3567   unsigned Alignment = MA->value();
3568   int64_t Offset;
3569 
3570   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3571     if (Base == &AssociatedValue) {
3572       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3573       // So we can say that the maximum power of two which is a divisor of
3574       // gcd(Offset, Alignment) is an alignment.
3575 
3576       uint32_t gcd =
3577           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3578       Alignment = llvm::PowerOf2Floor(gcd);
3579     }
3580   }
3581 
3582   return Alignment;
3583 }
3584 
3585 struct AAAlignImpl : AAAlign {
3586   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3587 
3588   /// See AbstractAttribute::initialize(...).
3589   void initialize(Attributor &A) override {
3590     SmallVector<Attribute, 4> Attrs;
3591     getAttrs({Attribute::Alignment}, Attrs);
3592     for (const Attribute &Attr : Attrs)
3593       takeKnownMaximum(Attr.getValueAsInt());
3594 
3595     if (getIRPosition().isFnInterfaceKind() &&
3596         (!getAnchorScope() ||
3597          !A.isFunctionIPOAmendable(*getAssociatedFunction())))
3598       indicatePessimisticFixpoint();
3599   }
3600 
3601   /// See AbstractAttribute::manifest(...).
3602   ChangeStatus manifest(Attributor &A) override {
3603     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3604 
3605     // Check for users that allow alignment annotations.
3606     Value &AssociatedValue = getAssociatedValue();
3607     for (const Use &U : AssociatedValue.uses()) {
3608       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3609         if (SI->getPointerOperand() == &AssociatedValue)
3610           if (SI->getAlignment() < getAssumedAlign()) {
3611             STATS_DECLTRACK(AAAlign, Store,
3612                             "Number of times alignment added to a store");
3613             SI->setAlignment(Align(getAssumedAlign()));
3614             LoadStoreChanged = ChangeStatus::CHANGED;
3615           }
3616       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3617         if (LI->getPointerOperand() == &AssociatedValue)
3618           if (LI->getAlignment() < getAssumedAlign()) {
3619             LI->setAlignment(Align(getAssumedAlign()));
3620             STATS_DECLTRACK(AAAlign, Load,
3621                             "Number of times alignment added to a load");
3622             LoadStoreChanged = ChangeStatus::CHANGED;
3623           }
3624       }
3625     }
3626 
3627     ChangeStatus Changed = AAAlign::manifest(A);
3628 
3629     MaybeAlign InheritAlign =
3630         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3631     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3632       return LoadStoreChanged;
3633     return Changed | LoadStoreChanged;
3634   }
3635 
3636   // TODO: Provide a helper to determine the implied ABI alignment and check in
3637   //       the existing manifest method and a new one for AAAlignImpl that value
3638   //       to avoid making the alignment explicit if it did not improve.
3639 
3640   /// See AbstractAttribute::getDeducedAttributes
3641   virtual void
3642   getDeducedAttributes(LLVMContext &Ctx,
3643                        SmallVectorImpl<Attribute> &Attrs) const override {
3644     if (getAssumedAlign() > 1)
3645       Attrs.emplace_back(
3646           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3647   }
3648   /// See AAFromMustBeExecutedContext
3649   bool followUse(Attributor &A, const Use *U, const Instruction *I,
3650                  AAAlign::StateType &State) {
3651     bool TrackUse = false;
3652 
3653     unsigned int KnownAlign =
3654         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3655     State.takeKnownMaximum(KnownAlign);
3656 
3657     return TrackUse;
3658   }
3659 
3660   /// See AbstractAttribute::getAsStr().
3661   const std::string getAsStr() const override {
3662     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3663                                 "-" + std::to_string(getAssumedAlign()) + ">")
3664                              : "unknown-align";
3665   }
3666 };
3667 
3668 /// Align attribute for a floating value.
3669 struct AAAlignFloating : AAFromMustBeExecutedContext<AAAlign, AAAlignImpl> {
3670   using Base = AAFromMustBeExecutedContext<AAAlign, AAAlignImpl>;
3671   AAAlignFloating(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3672 
3673   /// See AbstractAttribute::updateImpl(...).
3674   ChangeStatus updateImpl(Attributor &A) override {
3675     Base::updateImpl(A);
3676 
3677     const DataLayout &DL = A.getDataLayout();
3678 
3679     auto VisitValueCB = [&](Value &V, const Instruction *,
3680                             AAAlign::StateType &T, bool Stripped) -> bool {
3681       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3682       if (!Stripped && this == &AA) {
3683         // Use only IR information if we did not strip anything.
3684         const MaybeAlign PA = V.getPointerAlignment(DL);
3685         T.takeKnownMaximum(PA ? PA->value() : 0);
3686         T.indicatePessimisticFixpoint();
3687       } else {
3688         // Use abstract attribute information.
3689         const AAAlign::StateType &DS =
3690             static_cast<const AAAlign::StateType &>(AA.getState());
3691         T ^= DS;
3692       }
3693       return T.isValidState();
3694     };
3695 
3696     StateType T;
3697     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3698                                                    VisitValueCB, getCtxI()))
3699       return indicatePessimisticFixpoint();
3700 
3701     // TODO: If we know we visited all incoming values, thus no are assumed
3702     // dead, we can take the known information from the state T.
3703     return clampStateAndIndicateChange(getState(), T);
3704   }
3705 
3706   /// See AbstractAttribute::trackStatistics()
3707   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3708 };
3709 
3710 /// Align attribute for function return value.
3711 struct AAAlignReturned final
3712     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3713   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3714       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3715 
3716   /// See AbstractAttribute::trackStatistics()
3717   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3718 };
3719 
3720 /// Align attribute for function argument.
3721 struct AAAlignArgument final
3722     : AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3723                                                               AAAlignImpl> {
3724   using Base =
3725       AAArgumentFromCallSiteArgumentsAndMustBeExecutedContext<AAAlign,
3726                                                               AAAlignImpl>;
3727   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3728 
3729   /// See AbstractAttribute::manifest(...).
3730   ChangeStatus manifest(Attributor &A) override {
3731     // If the associated argument is involved in a must-tail call we give up
3732     // because we would need to keep the argument alignments of caller and
3733     // callee in-sync. Just does not seem worth the trouble right now.
3734     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3735       return ChangeStatus::UNCHANGED;
3736     return Base::manifest(A);
3737   }
3738 
3739   /// See AbstractAttribute::trackStatistics()
3740   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3741 };
3742 
3743 struct AAAlignCallSiteArgument final : AAAlignFloating {
3744   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3745       : AAAlignFloating(IRP, A) {}
3746 
3747   /// See AbstractAttribute::manifest(...).
3748   ChangeStatus manifest(Attributor &A) override {
3749     // If the associated argument is involved in a must-tail call we give up
3750     // because we would need to keep the argument alignments of caller and
3751     // callee in-sync. Just does not seem worth the trouble right now.
3752     if (Argument *Arg = getAssociatedArgument())
3753       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3754         return ChangeStatus::UNCHANGED;
3755     ChangeStatus Changed = AAAlignImpl::manifest(A);
3756     MaybeAlign InheritAlign =
3757         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3758     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3759       Changed = ChangeStatus::UNCHANGED;
3760     return Changed;
3761   }
3762 
3763   /// See AbstractAttribute::updateImpl(Attributor &A).
3764   ChangeStatus updateImpl(Attributor &A) override {
3765     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3766     if (Argument *Arg = getAssociatedArgument()) {
3767       // We only take known information from the argument
3768       // so we do not need to track a dependence.
3769       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3770           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3771       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3772     }
3773     return Changed;
3774   }
3775 
3776   /// See AbstractAttribute::trackStatistics()
3777   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3778 };
3779 
3780 /// Align attribute deduction for a call site return value.
3781 struct AAAlignCallSiteReturned final
3782     : AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3783                                                              AAAlignImpl> {
3784   using Base =
3785       AACallSiteReturnedFromReturnedAndMustBeExecutedContext<AAAlign,
3786                                                              AAAlignImpl>;
3787   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3788       : Base(IRP, A) {}
3789 
3790   /// See AbstractAttribute::initialize(...).
3791   void initialize(Attributor &A) override {
3792     Base::initialize(A);
3793     Function *F = getAssociatedFunction();
3794     if (!F)
3795       indicatePessimisticFixpoint();
3796   }
3797 
3798   /// See AbstractAttribute::trackStatistics()
3799   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3800 };
3801 
3802 /// ------------------ Function No-Return Attribute ----------------------------
3803 struct AANoReturnImpl : public AANoReturn {
3804   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3805 
3806   /// See AbstractAttribute::initialize(...).
3807   void initialize(Attributor &A) override {
3808     AANoReturn::initialize(A);
3809     Function *F = getAssociatedFunction();
3810     if (!F)
3811       indicatePessimisticFixpoint();
3812   }
3813 
3814   /// See AbstractAttribute::getAsStr().
3815   const std::string getAsStr() const override {
3816     return getAssumed() ? "noreturn" : "may-return";
3817   }
3818 
3819   /// See AbstractAttribute::updateImpl(Attributor &A).
3820   virtual ChangeStatus updateImpl(Attributor &A) override {
3821     auto CheckForNoReturn = [](Instruction &) { return false; };
3822     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3823                                    {(unsigned)Instruction::Ret}))
3824       return indicatePessimisticFixpoint();
3825     return ChangeStatus::UNCHANGED;
3826   }
3827 };
3828 
3829 struct AANoReturnFunction final : AANoReturnImpl {
3830   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3831       : AANoReturnImpl(IRP, A) {}
3832 
3833   /// See AbstractAttribute::trackStatistics()
3834   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3835 };
3836 
3837 /// NoReturn attribute deduction for a call sites.
3838 struct AANoReturnCallSite final : AANoReturnImpl {
3839   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3840       : AANoReturnImpl(IRP, A) {}
3841 
3842   /// See AbstractAttribute::updateImpl(...).
3843   ChangeStatus updateImpl(Attributor &A) override {
3844     // TODO: Once we have call site specific value information we can provide
3845     //       call site specific liveness information and then it makes
3846     //       sense to specialize attributes for call sites arguments instead of
3847     //       redirecting requests to the callee argument.
3848     Function *F = getAssociatedFunction();
3849     const IRPosition &FnPos = IRPosition::function(*F);
3850     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3851     return clampStateAndIndicateChange(
3852         getState(),
3853         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3854   }
3855 
3856   /// See AbstractAttribute::trackStatistics()
3857   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3858 };
3859 
3860 /// ----------------------- Variable Capturing ---------------------------------
3861 
3862 /// A class to hold the state of for no-capture attributes.
3863 struct AANoCaptureImpl : public AANoCapture {
3864   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3865 
3866   /// See AbstractAttribute::initialize(...).
3867   void initialize(Attributor &A) override {
3868     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3869       indicateOptimisticFixpoint();
3870       return;
3871     }
3872     Function *AnchorScope = getAnchorScope();
3873     if (isFnInterfaceKind() &&
3874         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3875       indicatePessimisticFixpoint();
3876       return;
3877     }
3878 
3879     // You cannot "capture" null in the default address space.
3880     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3881         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3882       indicateOptimisticFixpoint();
3883       return;
3884     }
3885 
3886     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3887 
3888     // Check what state the associated function can actually capture.
3889     if (F)
3890       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3891     else
3892       indicatePessimisticFixpoint();
3893   }
3894 
3895   /// See AbstractAttribute::updateImpl(...).
3896   ChangeStatus updateImpl(Attributor &A) override;
3897 
3898   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3899   virtual void
3900   getDeducedAttributes(LLVMContext &Ctx,
3901                        SmallVectorImpl<Attribute> &Attrs) const override {
3902     if (!isAssumedNoCaptureMaybeReturned())
3903       return;
3904 
3905     if (getArgNo() >= 0) {
3906       if (isAssumedNoCapture())
3907         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3908       else if (ManifestInternal)
3909         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3910     }
3911   }
3912 
3913   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3914   /// depending on the ability of the function associated with \p IRP to capture
3915   /// state in memory and through "returning/throwing", respectively.
3916   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3917                                                    const Function &F,
3918                                                    BitIntegerState &State) {
3919     // TODO: Once we have memory behavior attributes we should use them here.
3920 
3921     // If we know we cannot communicate or write to memory, we do not care about
3922     // ptr2int anymore.
3923     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3924         F.getReturnType()->isVoidTy()) {
3925       State.addKnownBits(NO_CAPTURE);
3926       return;
3927     }
3928 
3929     // A function cannot capture state in memory if it only reads memory, it can
3930     // however return/throw state and the state might be influenced by the
3931     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3932     if (F.onlyReadsMemory())
3933       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3934 
3935     // A function cannot communicate state back if it does not through
3936     // exceptions and doesn not return values.
3937     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3938       State.addKnownBits(NOT_CAPTURED_IN_RET);
3939 
3940     // Check existing "returned" attributes.
3941     int ArgNo = IRP.getArgNo();
3942     if (F.doesNotThrow() && ArgNo >= 0) {
3943       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3944         if (F.hasParamAttribute(u, Attribute::Returned)) {
3945           if (u == unsigned(ArgNo))
3946             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3947           else if (F.onlyReadsMemory())
3948             State.addKnownBits(NO_CAPTURE);
3949           else
3950             State.addKnownBits(NOT_CAPTURED_IN_RET);
3951           break;
3952         }
3953     }
3954   }
3955 
3956   /// See AbstractState::getAsStr().
3957   const std::string getAsStr() const override {
3958     if (isKnownNoCapture())
3959       return "known not-captured";
3960     if (isAssumedNoCapture())
3961       return "assumed not-captured";
3962     if (isKnownNoCaptureMaybeReturned())
3963       return "known not-captured-maybe-returned";
3964     if (isAssumedNoCaptureMaybeReturned())
3965       return "assumed not-captured-maybe-returned";
3966     return "assumed-captured";
3967   }
3968 };
3969 
3970 /// Attributor-aware capture tracker.
3971 struct AACaptureUseTracker final : public CaptureTracker {
3972 
3973   /// Create a capture tracker that can lookup in-flight abstract attributes
3974   /// through the Attributor \p A.
3975   ///
3976   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3977   /// search is stopped. If a use leads to a return instruction,
3978   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3979   /// If a use leads to a ptr2int which may capture the value,
3980   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3981   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3982   /// set. All values in \p PotentialCopies are later tracked as well. For every
3983   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3984   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3985   /// conservatively set to true.
3986   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3987                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3988                       SmallVectorImpl<const Value *> &PotentialCopies,
3989                       unsigned &RemainingUsesToExplore)
3990       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3991         PotentialCopies(PotentialCopies),
3992         RemainingUsesToExplore(RemainingUsesToExplore) {}
3993 
3994   /// Determine if \p V maybe captured. *Also updates the state!*
3995   bool valueMayBeCaptured(const Value *V) {
3996     if (V->getType()->isPointerTy()) {
3997       PointerMayBeCaptured(V, this);
3998     } else {
3999       State.indicatePessimisticFixpoint();
4000     }
4001     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4002   }
4003 
4004   /// See CaptureTracker::tooManyUses().
4005   void tooManyUses() override {
4006     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4007   }
4008 
4009   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4010     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4011       return true;
4012     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4013         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4014         DepClassTy::OPTIONAL);
4015     return DerefAA.getAssumedDereferenceableBytes();
4016   }
4017 
4018   /// See CaptureTracker::captured(...).
4019   bool captured(const Use *U) override {
4020     Instruction *UInst = cast<Instruction>(U->getUser());
4021     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4022                       << "\n");
4023 
4024     // Because we may reuse the tracker multiple times we keep track of the
4025     // number of explored uses ourselves as well.
4026     if (RemainingUsesToExplore-- == 0) {
4027       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4028       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4029                           /* Return */ true);
4030     }
4031 
4032     // Deal with ptr2int by following uses.
4033     if (isa<PtrToIntInst>(UInst)) {
4034       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4035       return valueMayBeCaptured(UInst);
4036     }
4037 
4038     // Explicitly catch return instructions.
4039     if (isa<ReturnInst>(UInst))
4040       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4041                           /* Return */ true);
4042 
4043     // For now we only use special logic for call sites. However, the tracker
4044     // itself knows about a lot of other non-capturing cases already.
4045     auto *CB = dyn_cast<CallBase>(UInst);
4046     if (!CB || !CB->isArgOperand(U))
4047       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4048                           /* Return */ true);
4049 
4050     unsigned ArgNo = CB->getArgOperandNo(U);
4051     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4052     // If we have a abstract no-capture attribute for the argument we can use
4053     // it to justify a non-capture attribute here. This allows recursion!
4054     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4055     if (ArgNoCaptureAA.isAssumedNoCapture())
4056       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4057                           /* Return */ false);
4058     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4059       addPotentialCopy(*CB);
4060       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4061                           /* Return */ false);
4062     }
4063 
4064     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4065     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4066                         /* Return */ true);
4067   }
4068 
4069   /// Register \p CS as potential copy of the value we are checking.
4070   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4071 
4072   /// See CaptureTracker::shouldExplore(...).
4073   bool shouldExplore(const Use *U) override {
4074     // Check liveness and ignore droppable users.
4075     return !U->getUser()->isDroppable() &&
4076            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4077   }
4078 
4079   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4080   /// \p CapturedInRet, then return the appropriate value for use in the
4081   /// CaptureTracker::captured() interface.
4082   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4083                     bool CapturedInRet) {
4084     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4085                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4086     if (CapturedInMem)
4087       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4088     if (CapturedInInt)
4089       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4090     if (CapturedInRet)
4091       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4092     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4093   }
4094 
4095 private:
4096   /// The attributor providing in-flight abstract attributes.
4097   Attributor &A;
4098 
4099   /// The abstract attribute currently updated.
4100   AANoCapture &NoCaptureAA;
4101 
4102   /// The abstract liveness state.
4103   const AAIsDead &IsDeadAA;
4104 
4105   /// The state currently updated.
4106   AANoCapture::StateType &State;
4107 
4108   /// Set of potential copies of the tracked value.
4109   SmallVectorImpl<const Value *> &PotentialCopies;
4110 
4111   /// Global counter to limit the number of explored uses.
4112   unsigned &RemainingUsesToExplore;
4113 };
4114 
4115 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4116   const IRPosition &IRP = getIRPosition();
4117   const Value *V =
4118       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4119   if (!V)
4120     return indicatePessimisticFixpoint();
4121 
4122   const Function *F =
4123       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4124   assert(F && "Expected a function!");
4125   const IRPosition &FnPos = IRPosition::function(*F);
4126   const auto &IsDeadAA =
4127       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4128 
4129   AANoCapture::StateType T;
4130 
4131   // Readonly means we cannot capture through memory.
4132   const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
4133       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4134   if (FnMemAA.isAssumedReadOnly()) {
4135     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4136     if (FnMemAA.isKnownReadOnly())
4137       addKnownBits(NOT_CAPTURED_IN_MEM);
4138   }
4139 
4140   // Make sure all returned values are different than the underlying value.
4141   // TODO: we could do this in a more sophisticated way inside
4142   //       AAReturnedValues, e.g., track all values that escape through returns
4143   //       directly somehow.
4144   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4145     bool SeenConstant = false;
4146     for (auto &It : RVAA.returned_values()) {
4147       if (isa<Constant>(It.first)) {
4148         if (SeenConstant)
4149           return false;
4150         SeenConstant = true;
4151       } else if (!isa<Argument>(It.first) ||
4152                  It.first == getAssociatedArgument())
4153         return false;
4154     }
4155     return true;
4156   };
4157 
4158   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4159       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4160   if (NoUnwindAA.isAssumedNoUnwind()) {
4161     bool IsVoidTy = F->getReturnType()->isVoidTy();
4162     const AAReturnedValues *RVAA =
4163         IsVoidTy ? nullptr
4164                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4165                                                  /* TrackDependence */ true,
4166                                                  DepClassTy::OPTIONAL);
4167     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4168       T.addKnownBits(NOT_CAPTURED_IN_RET);
4169       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4170         return ChangeStatus::UNCHANGED;
4171       if (NoUnwindAA.isKnownNoUnwind() &&
4172           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4173         addKnownBits(NOT_CAPTURED_IN_RET);
4174         if (isKnown(NOT_CAPTURED_IN_MEM))
4175           return indicateOptimisticFixpoint();
4176       }
4177     }
4178   }
4179 
4180   // Use the CaptureTracker interface and logic with the specialized tracker,
4181   // defined in AACaptureUseTracker, that can look at in-flight abstract
4182   // attributes and directly updates the assumed state.
4183   SmallVector<const Value *, 4> PotentialCopies;
4184   unsigned RemainingUsesToExplore =
4185       getDefaultMaxUsesToExploreForCaptureTracking();
4186   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4187                               RemainingUsesToExplore);
4188 
4189   // Check all potential copies of the associated value until we can assume
4190   // none will be captured or we have to assume at least one might be.
4191   unsigned Idx = 0;
4192   PotentialCopies.push_back(V);
4193   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4194     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4195 
4196   AANoCapture::StateType &S = getState();
4197   auto Assumed = S.getAssumed();
4198   S.intersectAssumedBits(T.getAssumed());
4199   if (!isAssumedNoCaptureMaybeReturned())
4200     return indicatePessimisticFixpoint();
4201   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4202                                    : ChangeStatus::CHANGED;
4203 }
4204 
4205 /// NoCapture attribute for function arguments.
4206 struct AANoCaptureArgument final : AANoCaptureImpl {
4207   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4208       : AANoCaptureImpl(IRP, A) {}
4209 
4210   /// See AbstractAttribute::trackStatistics()
4211   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4212 };
4213 
4214 /// NoCapture attribute for call site arguments.
4215 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4216   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4217       : AANoCaptureImpl(IRP, A) {}
4218 
4219   /// See AbstractAttribute::initialize(...).
4220   void initialize(Attributor &A) override {
4221     if (Argument *Arg = getAssociatedArgument())
4222       if (Arg->hasByValAttr())
4223         indicateOptimisticFixpoint();
4224     AANoCaptureImpl::initialize(A);
4225   }
4226 
4227   /// See AbstractAttribute::updateImpl(...).
4228   ChangeStatus updateImpl(Attributor &A) override {
4229     // TODO: Once we have call site specific value information we can provide
4230     //       call site specific liveness information and then it makes
4231     //       sense to specialize attributes for call sites arguments instead of
4232     //       redirecting requests to the callee argument.
4233     Argument *Arg = getAssociatedArgument();
4234     if (!Arg)
4235       return indicatePessimisticFixpoint();
4236     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4237     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4238     return clampStateAndIndicateChange(
4239         getState(),
4240         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4241   }
4242 
4243   /// See AbstractAttribute::trackStatistics()
4244   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4245 };
4246 
4247 /// NoCapture attribute for floating values.
4248 struct AANoCaptureFloating final : AANoCaptureImpl {
4249   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4250       : AANoCaptureImpl(IRP, A) {}
4251 
4252   /// See AbstractAttribute::trackStatistics()
4253   void trackStatistics() const override {
4254     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4255   }
4256 };
4257 
4258 /// NoCapture attribute for function return value.
4259 struct AANoCaptureReturned final : AANoCaptureImpl {
4260   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4261       : AANoCaptureImpl(IRP, A) {
4262     llvm_unreachable("NoCapture is not applicable to function returns!");
4263   }
4264 
4265   /// See AbstractAttribute::initialize(...).
4266   void initialize(Attributor &A) override {
4267     llvm_unreachable("NoCapture is not applicable to function returns!");
4268   }
4269 
4270   /// See AbstractAttribute::updateImpl(...).
4271   ChangeStatus updateImpl(Attributor &A) override {
4272     llvm_unreachable("NoCapture is not applicable to function returns!");
4273   }
4274 
4275   /// See AbstractAttribute::trackStatistics()
4276   void trackStatistics() const override {}
4277 };
4278 
4279 /// NoCapture attribute deduction for a call site return value.
4280 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4281   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4282       : AANoCaptureImpl(IRP, A) {}
4283 
4284   /// See AbstractAttribute::trackStatistics()
4285   void trackStatistics() const override {
4286     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4287   }
4288 };
4289 
4290 /// ------------------ Value Simplify Attribute ----------------------------
4291 struct AAValueSimplifyImpl : AAValueSimplify {
4292   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4293       : AAValueSimplify(IRP, A) {}
4294 
4295   /// See AbstractAttribute::initialize(...).
4296   void initialize(Attributor &A) override {
4297     if (getAssociatedValue().getType()->isVoidTy())
4298       indicatePessimisticFixpoint();
4299   }
4300 
4301   /// See AbstractAttribute::getAsStr().
4302   const std::string getAsStr() const override {
4303     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4304                         : "not-simple";
4305   }
4306 
4307   /// See AbstractAttribute::trackStatistics()
4308   void trackStatistics() const override {}
4309 
4310   /// See AAValueSimplify::getAssumedSimplifiedValue()
4311   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4312     if (!getAssumed())
4313       return const_cast<Value *>(&getAssociatedValue());
4314     return SimplifiedAssociatedValue;
4315   }
4316 
4317   /// Helper function for querying AAValueSimplify and updating candicate.
4318   /// \param QueryingValue Value trying to unify with SimplifiedValue
4319   /// \param AccumulatedSimplifiedValue Current simplification result.
4320   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4321                              Value &QueryingValue,
4322                              Optional<Value *> &AccumulatedSimplifiedValue) {
4323     // FIXME: Add a typecast support.
4324 
4325     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4326         QueryingAA, IRPosition::value(QueryingValue));
4327 
4328     Optional<Value *> QueryingValueSimplified =
4329         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4330 
4331     if (!QueryingValueSimplified.hasValue())
4332       return true;
4333 
4334     if (!QueryingValueSimplified.getValue())
4335       return false;
4336 
4337     Value &QueryingValueSimplifiedUnwrapped =
4338         *QueryingValueSimplified.getValue();
4339 
4340     if (AccumulatedSimplifiedValue.hasValue() &&
4341         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4342         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4343       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4344     if (AccumulatedSimplifiedValue.hasValue() &&
4345         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4346       return true;
4347 
4348     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4349                       << " is assumed to be "
4350                       << QueryingValueSimplifiedUnwrapped << "\n");
4351 
4352     AccumulatedSimplifiedValue = QueryingValueSimplified;
4353     return true;
4354   }
4355 
4356   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4357     if (!getAssociatedValue().getType()->isIntegerTy())
4358       return false;
4359 
4360     const auto &ValueConstantRangeAA =
4361         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4362 
4363     Optional<ConstantInt *> COpt =
4364         ValueConstantRangeAA.getAssumedConstantInt(A);
4365     if (COpt.hasValue()) {
4366       if (auto *C = COpt.getValue())
4367         SimplifiedAssociatedValue = C;
4368       else
4369         return false;
4370     } else {
4371       SimplifiedAssociatedValue = llvm::None;
4372     }
4373     return true;
4374   }
4375 
4376   /// See AbstractAttribute::manifest(...).
4377   ChangeStatus manifest(Attributor &A) override {
4378     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4379 
4380     if (SimplifiedAssociatedValue.hasValue() &&
4381         !SimplifiedAssociatedValue.getValue())
4382       return Changed;
4383 
4384     Value &V = getAssociatedValue();
4385     auto *C = SimplifiedAssociatedValue.hasValue()
4386                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4387                   : UndefValue::get(V.getType());
4388     if (C) {
4389       // We can replace the AssociatedValue with the constant.
4390       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4391         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4392                           << " :: " << *this << "\n");
4393         if (A.changeValueAfterManifest(V, *C))
4394           Changed = ChangeStatus::CHANGED;
4395       }
4396     }
4397 
4398     return Changed | AAValueSimplify::manifest(A);
4399   }
4400 
4401   /// See AbstractState::indicatePessimisticFixpoint(...).
4402   ChangeStatus indicatePessimisticFixpoint() override {
4403     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4404     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4405     SimplifiedAssociatedValue = &getAssociatedValue();
4406     indicateOptimisticFixpoint();
4407     return ChangeStatus::CHANGED;
4408   }
4409 
4410 protected:
4411   // An assumed simplified value. Initially, it is set to Optional::None, which
4412   // means that the value is not clear under current assumption. If in the
4413   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4414   // returns orignal associated value.
4415   Optional<Value *> SimplifiedAssociatedValue;
4416 };
4417 
4418 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4419   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4420       : AAValueSimplifyImpl(IRP, A) {}
4421 
4422   void initialize(Attributor &A) override {
4423     AAValueSimplifyImpl::initialize(A);
4424     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4425       indicatePessimisticFixpoint();
4426     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4427                 /* IgnoreSubsumingPositions */ true))
4428       indicatePessimisticFixpoint();
4429 
4430     // FIXME: This is a hack to prevent us from propagating function poiner in
4431     // the new pass manager CGSCC pass as it creates call edges the
4432     // CallGraphUpdater cannot handle yet.
4433     Value &V = getAssociatedValue();
4434     if (V.getType()->isPointerTy() &&
4435         V.getType()->getPointerElementType()->isFunctionTy() &&
4436         !A.isModulePass())
4437       indicatePessimisticFixpoint();
4438   }
4439 
4440   /// See AbstractAttribute::updateImpl(...).
4441   ChangeStatus updateImpl(Attributor &A) override {
4442     // Byval is only replacable if it is readonly otherwise we would write into
4443     // the replaced value and not the copy that byval creates implicitly.
4444     Argument *Arg = getAssociatedArgument();
4445     if (Arg->hasByValAttr()) {
4446       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4447       //       there is no race by not copying a constant byval.
4448       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4449       if (!MemAA.isAssumedReadOnly())
4450         return indicatePessimisticFixpoint();
4451     }
4452 
4453     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4454 
4455     auto PredForCallSite = [&](AbstractCallSite ACS) {
4456       const IRPosition &ACSArgPos =
4457           IRPosition::callsite_argument(ACS, getArgNo());
4458       // Check if a coresponding argument was found or if it is on not
4459       // associated (which can happen for callback calls).
4460       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4461         return false;
4462 
4463       // We can only propagate thread independent values through callbacks.
4464       // This is different to direct/indirect call sites because for them we
4465       // know the thread executing the caller and callee is the same. For
4466       // callbacks this is not guaranteed, thus a thread dependent value could
4467       // be different for the caller and callee, making it invalid to propagate.
4468       Value &ArgOp = ACSArgPos.getAssociatedValue();
4469       if (ACS.isCallbackCall())
4470         if (auto *C = dyn_cast<Constant>(&ArgOp))
4471           if (C->isThreadDependent())
4472             return false;
4473       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4474     };
4475 
4476     bool AllCallSitesKnown;
4477     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4478                                 AllCallSitesKnown))
4479       if (!askSimplifiedValueForAAValueConstantRange(A))
4480         return indicatePessimisticFixpoint();
4481 
4482     // If a candicate was found in this update, return CHANGED.
4483     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4484                ? ChangeStatus::UNCHANGED
4485                : ChangeStatus ::CHANGED;
4486   }
4487 
4488   /// See AbstractAttribute::trackStatistics()
4489   void trackStatistics() const override {
4490     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4491   }
4492 };
4493 
4494 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4495   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4496       : AAValueSimplifyImpl(IRP, A) {}
4497 
4498   /// See AbstractAttribute::updateImpl(...).
4499   ChangeStatus updateImpl(Attributor &A) override {
4500     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4501 
4502     auto PredForReturned = [&](Value &V) {
4503       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4504     };
4505 
4506     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4507       if (!askSimplifiedValueForAAValueConstantRange(A))
4508         return indicatePessimisticFixpoint();
4509 
4510     // If a candicate was found in this update, return CHANGED.
4511     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4512                ? ChangeStatus::UNCHANGED
4513                : ChangeStatus ::CHANGED;
4514   }
4515 
4516   ChangeStatus manifest(Attributor &A) override {
4517     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4518 
4519     if (SimplifiedAssociatedValue.hasValue() &&
4520         !SimplifiedAssociatedValue.getValue())
4521       return Changed;
4522 
4523     Value &V = getAssociatedValue();
4524     auto *C = SimplifiedAssociatedValue.hasValue()
4525                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4526                   : UndefValue::get(V.getType());
4527     if (C) {
4528       auto PredForReturned =
4529           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4530             // We can replace the AssociatedValue with the constant.
4531             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4532               return true;
4533 
4534             for (ReturnInst *RI : RetInsts) {
4535               if (RI->getFunction() != getAnchorScope())
4536                 continue;
4537               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4538                                 << " in " << *RI << " :: " << *this << "\n");
4539               if (A.changeUseAfterManifest(RI->getOperandUse(0), *C))
4540                 Changed = ChangeStatus::CHANGED;
4541             }
4542             return true;
4543           };
4544       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4545     }
4546 
4547     return Changed | AAValueSimplify::manifest(A);
4548   }
4549 
4550   /// See AbstractAttribute::trackStatistics()
4551   void trackStatistics() const override {
4552     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4553   }
4554 };
4555 
4556 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4557   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4558       : AAValueSimplifyImpl(IRP, A) {}
4559 
4560   /// See AbstractAttribute::initialize(...).
4561   void initialize(Attributor &A) override {
4562     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4563     //        Needs investigation.
4564     // AAValueSimplifyImpl::initialize(A);
4565     Value &V = getAnchorValue();
4566 
4567     // TODO: add other stuffs
4568     if (isa<Constant>(V))
4569       indicatePessimisticFixpoint();
4570   }
4571 
4572   /// See AbstractAttribute::updateImpl(...).
4573   ChangeStatus updateImpl(Attributor &A) override {
4574     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4575 
4576     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4577                             bool Stripped) -> bool {
4578       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4579       if (!Stripped && this == &AA) {
4580         // TODO: Look the instruction and check recursively.
4581 
4582         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4583                           << "\n");
4584         return false;
4585       }
4586       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4587     };
4588 
4589     bool Dummy = false;
4590     if (!genericValueTraversal<AAValueSimplify, bool>(
4591             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4592       if (!askSimplifiedValueForAAValueConstantRange(A))
4593         return indicatePessimisticFixpoint();
4594 
4595     // If a candicate was found in this update, return CHANGED.
4596 
4597     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4598                ? ChangeStatus::UNCHANGED
4599                : ChangeStatus ::CHANGED;
4600   }
4601 
4602   /// See AbstractAttribute::trackStatistics()
4603   void trackStatistics() const override {
4604     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4605   }
4606 };
4607 
4608 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4609   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4610       : AAValueSimplifyImpl(IRP, A) {}
4611 
4612   /// See AbstractAttribute::initialize(...).
4613   void initialize(Attributor &A) override {
4614     SimplifiedAssociatedValue = &getAnchorValue();
4615     indicateOptimisticFixpoint();
4616   }
4617   /// See AbstractAttribute::initialize(...).
4618   ChangeStatus updateImpl(Attributor &A) override {
4619     llvm_unreachable(
4620         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4621   }
4622   /// See AbstractAttribute::trackStatistics()
4623   void trackStatistics() const override {
4624     STATS_DECLTRACK_FN_ATTR(value_simplify)
4625   }
4626 };
4627 
4628 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4629   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4630       : AAValueSimplifyFunction(IRP, A) {}
4631   /// See AbstractAttribute::trackStatistics()
4632   void trackStatistics() const override {
4633     STATS_DECLTRACK_CS_ATTR(value_simplify)
4634   }
4635 };
4636 
4637 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4638   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4639       : AAValueSimplifyReturned(IRP, A) {}
4640 
4641   /// See AbstractAttribute::manifest(...).
4642   ChangeStatus manifest(Attributor &A) override {
4643     return AAValueSimplifyImpl::manifest(A);
4644   }
4645 
4646   void trackStatistics() const override {
4647     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4648   }
4649 };
4650 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4651   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4652       : AAValueSimplifyFloating(IRP, A) {}
4653 
4654   void trackStatistics() const override {
4655     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4656   }
4657 };
4658 
4659 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4660 struct AAHeapToStackImpl : public AAHeapToStack {
4661   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4662       : AAHeapToStack(IRP, A) {}
4663 
4664   const std::string getAsStr() const override {
4665     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4666   }
4667 
4668   ChangeStatus manifest(Attributor &A) override {
4669     assert(getState().isValidState() &&
4670            "Attempted to manifest an invalid state!");
4671 
4672     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4673     Function *F = getAnchorScope();
4674     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4675 
4676     for (Instruction *MallocCall : MallocCalls) {
4677       // This malloc cannot be replaced.
4678       if (BadMallocCalls.count(MallocCall))
4679         continue;
4680 
4681       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4682         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4683         A.deleteAfterManifest(*FreeCall);
4684         HasChanged = ChangeStatus::CHANGED;
4685       }
4686 
4687       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4688                         << "\n");
4689 
4690       MaybeAlign Alignment;
4691       Constant *Size;
4692       if (isCallocLikeFn(MallocCall, TLI)) {
4693         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4694         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4695         APInt TotalSize = SizeT->getValue() * Num->getValue();
4696         Size =
4697             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4698       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4699         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4700         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4701                                    ->getValue()
4702                                    .getZExtValue());
4703       } else {
4704         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4705       }
4706 
4707       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4708       Instruction *AI =
4709           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4710                          "", MallocCall->getNextNode());
4711 
4712       if (AI->getType() != MallocCall->getType())
4713         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4714                              AI->getNextNode());
4715 
4716       A.changeValueAfterManifest(*MallocCall, *AI);
4717 
4718       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4719         auto *NBB = II->getNormalDest();
4720         BranchInst::Create(NBB, MallocCall->getParent());
4721         A.deleteAfterManifest(*MallocCall);
4722       } else {
4723         A.deleteAfterManifest(*MallocCall);
4724       }
4725 
4726       // Zero out the allocated memory if it was a calloc.
4727       if (isCallocLikeFn(MallocCall, TLI)) {
4728         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4729                                    AI->getNextNode());
4730         Value *Ops[] = {
4731             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4732             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4733 
4734         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4735         Module *M = F->getParent();
4736         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4737         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4738       }
4739       HasChanged = ChangeStatus::CHANGED;
4740     }
4741 
4742     return HasChanged;
4743   }
4744 
4745   /// Collection of all malloc calls in a function.
4746   SmallSetVector<Instruction *, 4> MallocCalls;
4747 
4748   /// Collection of malloc calls that cannot be converted.
4749   DenseSet<const Instruction *> BadMallocCalls;
4750 
4751   /// A map for each malloc call to the set of associated free calls.
4752   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4753 
4754   ChangeStatus updateImpl(Attributor &A) override;
4755 };
4756 
4757 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4758   const Function *F = getAnchorScope();
4759   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4760 
4761   MustBeExecutedContextExplorer &Explorer =
4762       A.getInfoCache().getMustBeExecutedContextExplorer();
4763 
4764   auto FreeCheck = [&](Instruction &I) {
4765     const auto &Frees = FreesForMalloc.lookup(&I);
4766     if (Frees.size() != 1)
4767       return false;
4768     Instruction *UniqueFree = *Frees.begin();
4769     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4770   };
4771 
4772   auto UsesCheck = [&](Instruction &I) {
4773     bool ValidUsesOnly = true;
4774     bool MustUse = true;
4775     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4776       Instruction *UserI = cast<Instruction>(U.getUser());
4777       if (isa<LoadInst>(UserI))
4778         return true;
4779       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4780         if (SI->getValueOperand() == U.get()) {
4781           LLVM_DEBUG(dbgs()
4782                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4783           ValidUsesOnly = false;
4784         } else {
4785           // A store into the malloc'ed memory is fine.
4786         }
4787         return true;
4788       }
4789       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4790         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4791           return true;
4792         // Record malloc.
4793         if (isFreeCall(UserI, TLI)) {
4794           if (MustUse) {
4795             FreesForMalloc[&I].insert(UserI);
4796           } else {
4797             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4798                               << *UserI << "\n");
4799             ValidUsesOnly = false;
4800           }
4801           return true;
4802         }
4803 
4804         unsigned ArgNo = CB->getArgOperandNo(&U);
4805 
4806         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4807             *this, IRPosition::callsite_argument(*CB, ArgNo));
4808 
4809         // If a callsite argument use is nofree, we are fine.
4810         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4811             *this, IRPosition::callsite_argument(*CB, ArgNo));
4812 
4813         if (!NoCaptureAA.isAssumedNoCapture() ||
4814             !ArgNoFreeAA.isAssumedNoFree()) {
4815           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4816           ValidUsesOnly = false;
4817         }
4818         return true;
4819       }
4820 
4821       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4822           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4823         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4824         Follow = true;
4825         return true;
4826       }
4827       // Unknown user for which we can not track uses further (in a way that
4828       // makes sense).
4829       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4830       ValidUsesOnly = false;
4831       return true;
4832     };
4833     A.checkForAllUses(Pred, *this, I);
4834     return ValidUsesOnly;
4835   };
4836 
4837   auto MallocCallocCheck = [&](Instruction &I) {
4838     if (BadMallocCalls.count(&I))
4839       return true;
4840 
4841     bool IsMalloc = isMallocLikeFn(&I, TLI);
4842     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4843     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4844     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4845       BadMallocCalls.insert(&I);
4846       return true;
4847     }
4848 
4849     if (IsMalloc) {
4850       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4851         if (Size->getValue().ule(MaxHeapToStackSize))
4852           if (UsesCheck(I) || FreeCheck(I)) {
4853             MallocCalls.insert(&I);
4854             return true;
4855           }
4856     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4857       // Only if the alignment and sizes are constant.
4858       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4859         if (Size->getValue().ule(MaxHeapToStackSize))
4860           if (UsesCheck(I) || FreeCheck(I)) {
4861             MallocCalls.insert(&I);
4862             return true;
4863           }
4864     } else if (IsCalloc) {
4865       bool Overflow = false;
4866       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4867         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4868           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4869                   .ule(MaxHeapToStackSize))
4870             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4871               MallocCalls.insert(&I);
4872               return true;
4873             }
4874     }
4875 
4876     BadMallocCalls.insert(&I);
4877     return true;
4878   };
4879 
4880   size_t NumBadMallocs = BadMallocCalls.size();
4881 
4882   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4883 
4884   if (NumBadMallocs != BadMallocCalls.size())
4885     return ChangeStatus::CHANGED;
4886 
4887   return ChangeStatus::UNCHANGED;
4888 }
4889 
4890 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4891   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4892       : AAHeapToStackImpl(IRP, A) {}
4893 
4894   /// See AbstractAttribute::trackStatistics().
4895   void trackStatistics() const override {
4896     STATS_DECL(
4897         MallocCalls, Function,
4898         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4899     for (auto *C : MallocCalls)
4900       if (!BadMallocCalls.count(C))
4901         ++BUILD_STAT_NAME(MallocCalls, Function);
4902   }
4903 };
4904 
4905 /// ----------------------- Privatizable Pointers ------------------------------
4906 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4907   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4908       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4909 
4910   ChangeStatus indicatePessimisticFixpoint() override {
4911     AAPrivatizablePtr::indicatePessimisticFixpoint();
4912     PrivatizableType = nullptr;
4913     return ChangeStatus::CHANGED;
4914   }
4915 
4916   /// Identify the type we can chose for a private copy of the underlying
4917   /// argument. None means it is not clear yet, nullptr means there is none.
4918   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4919 
4920   /// Return a privatizable type that encloses both T0 and T1.
4921   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4922   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4923     if (!T0.hasValue())
4924       return T1;
4925     if (!T1.hasValue())
4926       return T0;
4927     if (T0 == T1)
4928       return T0;
4929     return nullptr;
4930   }
4931 
4932   Optional<Type *> getPrivatizableType() const override {
4933     return PrivatizableType;
4934   }
4935 
4936   const std::string getAsStr() const override {
4937     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4938   }
4939 
4940 protected:
4941   Optional<Type *> PrivatizableType;
4942 };
4943 
4944 // TODO: Do this for call site arguments (probably also other values) as well.
4945 
4946 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4947   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4948       : AAPrivatizablePtrImpl(IRP, A) {}
4949 
4950   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4951   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4952     // If this is a byval argument and we know all the call sites (so we can
4953     // rewrite them), there is no need to check them explicitly.
4954     bool AllCallSitesKnown;
4955     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4956         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4957                                true, AllCallSitesKnown))
4958       return getAssociatedValue().getType()->getPointerElementType();
4959 
4960     Optional<Type *> Ty;
4961     unsigned ArgNo = getIRPosition().getArgNo();
4962 
4963     // Make sure the associated call site argument has the same type at all call
4964     // sites and it is an allocation we know is safe to privatize, for now that
4965     // means we only allow alloca instructions.
4966     // TODO: We can additionally analyze the accesses in the callee to  create
4967     //       the type from that information instead. That is a little more
4968     //       involved and will be done in a follow up patch.
4969     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4970       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4971       // Check if a coresponding argument was found or if it is one not
4972       // associated (which can happen for callback calls).
4973       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4974         return false;
4975 
4976       // Check that all call sites agree on a type.
4977       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4978       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4979 
4980       LLVM_DEBUG({
4981         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4982         if (CSTy.hasValue() && CSTy.getValue())
4983           CSTy.getValue()->print(dbgs());
4984         else if (CSTy.hasValue())
4985           dbgs() << "<nullptr>";
4986         else
4987           dbgs() << "<none>";
4988       });
4989 
4990       Ty = combineTypes(Ty, CSTy);
4991 
4992       LLVM_DEBUG({
4993         dbgs() << " : New Type: ";
4994         if (Ty.hasValue() && Ty.getValue())
4995           Ty.getValue()->print(dbgs());
4996         else if (Ty.hasValue())
4997           dbgs() << "<nullptr>";
4998         else
4999           dbgs() << "<none>";
5000         dbgs() << "\n";
5001       });
5002 
5003       return !Ty.hasValue() || Ty.getValue();
5004     };
5005 
5006     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5007       return nullptr;
5008     return Ty;
5009   }
5010 
5011   /// See AbstractAttribute::updateImpl(...).
5012   ChangeStatus updateImpl(Attributor &A) override {
5013     PrivatizableType = identifyPrivatizableType(A);
5014     if (!PrivatizableType.hasValue())
5015       return ChangeStatus::UNCHANGED;
5016     if (!PrivatizableType.getValue())
5017       return indicatePessimisticFixpoint();
5018 
5019     // Avoid arguments with padding for now.
5020     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5021         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5022                                                 A.getInfoCache().getDL())) {
5023       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5024       return indicatePessimisticFixpoint();
5025     }
5026 
5027     // Verify callee and caller agree on how the promoted argument would be
5028     // passed.
5029     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5030     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5031     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5032     Function &Fn = *getIRPosition().getAnchorScope();
5033     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5034     ArgsToPromote.insert(getAssociatedArgument());
5035     const auto *TTI =
5036         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5037     if (!TTI ||
5038         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5039             Fn, *TTI, ArgsToPromote, Dummy) ||
5040         ArgsToPromote.empty()) {
5041       LLVM_DEBUG(
5042           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5043                  << Fn.getName() << "\n");
5044       return indicatePessimisticFixpoint();
5045     }
5046 
5047     // Collect the types that will replace the privatizable type in the function
5048     // signature.
5049     SmallVector<Type *, 16> ReplacementTypes;
5050     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5051 
5052     // Register a rewrite of the argument.
5053     Argument *Arg = getAssociatedArgument();
5054     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5055       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5056       return indicatePessimisticFixpoint();
5057     }
5058 
5059     unsigned ArgNo = Arg->getArgNo();
5060 
5061     // Helper to check if for the given call site the associated argument is
5062     // passed to a callback where the privatization would be different.
5063     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5064       SmallVector<const Use *, 4> CallbackUses;
5065       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5066       for (const Use *U : CallbackUses) {
5067         AbstractCallSite CBACS(U);
5068         assert(CBACS && CBACS.isCallbackCall());
5069         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5070           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5071 
5072           LLVM_DEBUG({
5073             dbgs()
5074                 << "[AAPrivatizablePtr] Argument " << *Arg
5075                 << "check if can be privatized in the context of its parent ("
5076                 << Arg->getParent()->getName()
5077                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5078                    "callback ("
5079                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5080                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5081                 << CBACS.getCallArgOperand(CBArg) << " vs "
5082                 << CB.getArgOperand(ArgNo) << "\n"
5083                 << "[AAPrivatizablePtr] " << CBArg << " : "
5084                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5085           });
5086 
5087           if (CBArgNo != int(ArgNo))
5088             continue;
5089           const auto &CBArgPrivAA =
5090               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5091           if (CBArgPrivAA.isValidState()) {
5092             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5093             if (!CBArgPrivTy.hasValue())
5094               continue;
5095             if (CBArgPrivTy.getValue() == PrivatizableType)
5096               continue;
5097           }
5098 
5099           LLVM_DEBUG({
5100             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5101                    << " cannot be privatized in the context of its parent ("
5102                    << Arg->getParent()->getName()
5103                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5104                       "callback ("
5105                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5106                    << ").\n[AAPrivatizablePtr] for which the argument "
5107                       "privatization is not compatible.\n";
5108           });
5109           return false;
5110         }
5111       }
5112       return true;
5113     };
5114 
5115     // Helper to check if for the given call site the associated argument is
5116     // passed to a direct call where the privatization would be different.
5117     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5118       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5119       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5120       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5121              "Expected a direct call operand for callback call operand");
5122 
5123       LLVM_DEBUG({
5124         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5125                << " check if be privatized in the context of its parent ("
5126                << Arg->getParent()->getName()
5127                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5128                   "direct call of ("
5129                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5130                << ").\n";
5131       });
5132 
5133       Function *DCCallee = DC->getCalledFunction();
5134       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5135         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5136             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5137         if (DCArgPrivAA.isValidState()) {
5138           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5139           if (!DCArgPrivTy.hasValue())
5140             return true;
5141           if (DCArgPrivTy.getValue() == PrivatizableType)
5142             return true;
5143         }
5144       }
5145 
5146       LLVM_DEBUG({
5147         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5148                << " cannot be privatized in the context of its parent ("
5149                << Arg->getParent()->getName()
5150                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5151                   "direct call of ("
5152                << ACS.getInstruction()->getCalledFunction()->getName()
5153                << ").\n[AAPrivatizablePtr] for which the argument "
5154                   "privatization is not compatible.\n";
5155       });
5156       return false;
5157     };
5158 
5159     // Helper to check if the associated argument is used at the given abstract
5160     // call site in a way that is incompatible with the privatization assumed
5161     // here.
5162     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5163       if (ACS.isDirectCall())
5164         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5165       if (ACS.isCallbackCall())
5166         return IsCompatiblePrivArgOfDirectCS(ACS);
5167       return false;
5168     };
5169 
5170     bool AllCallSitesKnown;
5171     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5172                                 AllCallSitesKnown))
5173       return indicatePessimisticFixpoint();
5174 
5175     return ChangeStatus::UNCHANGED;
5176   }
5177 
5178   /// Given a type to private \p PrivType, collect the constituates (which are
5179   /// used) in \p ReplacementTypes.
5180   static void
5181   identifyReplacementTypes(Type *PrivType,
5182                            SmallVectorImpl<Type *> &ReplacementTypes) {
5183     // TODO: For now we expand the privatization type to the fullest which can
5184     //       lead to dead arguments that need to be removed later.
5185     assert(PrivType && "Expected privatizable type!");
5186 
5187     // Traverse the type, extract constituate types on the outermost level.
5188     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5189       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5190         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5191     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5192       ReplacementTypes.append(PrivArrayType->getNumElements(),
5193                               PrivArrayType->getElementType());
5194     } else {
5195       ReplacementTypes.push_back(PrivType);
5196     }
5197   }
5198 
5199   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5200   /// The values needed are taken from the arguments of \p F starting at
5201   /// position \p ArgNo.
5202   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5203                                    unsigned ArgNo, Instruction &IP) {
5204     assert(PrivType && "Expected privatizable type!");
5205 
5206     IRBuilder<NoFolder> IRB(&IP);
5207     const DataLayout &DL = F.getParent()->getDataLayout();
5208 
5209     // Traverse the type, build GEPs and stores.
5210     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5211       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5212       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5213         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5214         Value *Ptr = constructPointer(
5215             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5216         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5217       }
5218     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5219       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5220       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5221       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5222         Value *Ptr =
5223             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5224         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5225       }
5226     } else {
5227       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5228     }
5229   }
5230 
5231   /// Extract values from \p Base according to the type \p PrivType at the
5232   /// call position \p ACS. The values are appended to \p ReplacementValues.
5233   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5234                                Value *Base,
5235                                SmallVectorImpl<Value *> &ReplacementValues) {
5236     assert(Base && "Expected base value!");
5237     assert(PrivType && "Expected privatizable type!");
5238     Instruction *IP = ACS.getInstruction();
5239 
5240     IRBuilder<NoFolder> IRB(IP);
5241     const DataLayout &DL = IP->getModule()->getDataLayout();
5242 
5243     if (Base->getType()->getPointerElementType() != PrivType)
5244       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5245                                                  "", ACS.getInstruction());
5246 
5247     // TODO: Improve the alignment of the loads.
5248     // Traverse the type, build GEPs and loads.
5249     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5250       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5251       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5252         Type *PointeeTy = PrivStructType->getElementType(u);
5253         Value *Ptr =
5254             constructPointer(PointeeTy->getPointerTo(), Base,
5255                              PrivStructLayout->getElementOffset(u), IRB, DL);
5256         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5257         L->setAlignment(Align(1));
5258         ReplacementValues.push_back(L);
5259       }
5260     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5261       Type *PointeeTy = PrivArrayType->getElementType();
5262       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5263       Type *PointeePtrTy = PointeeTy->getPointerTo();
5264       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5265         Value *Ptr =
5266             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5267         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5268         L->setAlignment(Align(1));
5269         ReplacementValues.push_back(L);
5270       }
5271     } else {
5272       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5273       L->setAlignment(Align(1));
5274       ReplacementValues.push_back(L);
5275     }
5276   }
5277 
5278   /// See AbstractAttribute::manifest(...)
5279   ChangeStatus manifest(Attributor &A) override {
5280     if (!PrivatizableType.hasValue())
5281       return ChangeStatus::UNCHANGED;
5282     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5283 
5284     // Collect all tail calls in the function as we cannot allow new allocas to
5285     // escape into tail recursion.
5286     // TODO: Be smarter about new allocas escaping into tail calls.
5287     SmallVector<CallInst *, 16> TailCalls;
5288     if (!A.checkForAllInstructions(
5289             [&](Instruction &I) {
5290               CallInst &CI = cast<CallInst>(I);
5291               if (CI.isTailCall())
5292                 TailCalls.push_back(&CI);
5293               return true;
5294             },
5295             *this, {Instruction::Call}))
5296       return ChangeStatus::UNCHANGED;
5297 
5298     Argument *Arg = getAssociatedArgument();
5299 
5300     // Callback to repair the associated function. A new alloca is placed at the
5301     // beginning and initialized with the values passed through arguments. The
5302     // new alloca replaces the use of the old pointer argument.
5303     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5304         [=](const Attributor::ArgumentReplacementInfo &ARI,
5305             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5306           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5307           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5308           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5309                                     Arg->getName() + ".priv", IP);
5310           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5311                                ArgIt->getArgNo(), *IP);
5312           Arg->replaceAllUsesWith(AI);
5313 
5314           for (CallInst *CI : TailCalls)
5315             CI->setTailCall(false);
5316         };
5317 
5318     // Callback to repair a call site of the associated function. The elements
5319     // of the privatizable type are loaded prior to the call and passed to the
5320     // new function version.
5321     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5322         [=](const Attributor::ArgumentReplacementInfo &ARI,
5323             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5324           createReplacementValues(
5325               PrivatizableType.getValue(), ACS,
5326               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5327               NewArgOperands);
5328         };
5329 
5330     // Collect the types that will replace the privatizable type in the function
5331     // signature.
5332     SmallVector<Type *, 16> ReplacementTypes;
5333     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5334 
5335     // Register a rewrite of the argument.
5336     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5337                                            std::move(FnRepairCB),
5338                                            std::move(ACSRepairCB)))
5339       return ChangeStatus::CHANGED;
5340     return ChangeStatus::UNCHANGED;
5341   }
5342 
5343   /// See AbstractAttribute::trackStatistics()
5344   void trackStatistics() const override {
5345     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5346   }
5347 };
5348 
5349 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5350   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5351       : AAPrivatizablePtrImpl(IRP, A) {}
5352 
5353   /// See AbstractAttribute::initialize(...).
5354   virtual void initialize(Attributor &A) override {
5355     // TODO: We can privatize more than arguments.
5356     indicatePessimisticFixpoint();
5357   }
5358 
5359   ChangeStatus updateImpl(Attributor &A) override {
5360     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5361                      "updateImpl will not be called");
5362   }
5363 
5364   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5365   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5366     Value *Obj =
5367         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5368     if (!Obj) {
5369       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5370       return nullptr;
5371     }
5372 
5373     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5374       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5375         if (CI->isOne())
5376           return Obj->getType()->getPointerElementType();
5377     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5378       auto &PrivArgAA =
5379           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5380       if (PrivArgAA.isAssumedPrivatizablePtr())
5381         return Obj->getType()->getPointerElementType();
5382     }
5383 
5384     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5385                          "alloca nor privatizable argument: "
5386                       << *Obj << "!\n");
5387     return nullptr;
5388   }
5389 
5390   /// See AbstractAttribute::trackStatistics()
5391   void trackStatistics() const override {
5392     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5393   }
5394 };
5395 
5396 struct AAPrivatizablePtrCallSiteArgument final
5397     : public AAPrivatizablePtrFloating {
5398   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5399       : AAPrivatizablePtrFloating(IRP, A) {}
5400 
5401   /// See AbstractAttribute::initialize(...).
5402   void initialize(Attributor &A) override {
5403     if (getIRPosition().hasAttr(Attribute::ByVal))
5404       indicateOptimisticFixpoint();
5405   }
5406 
5407   /// See AbstractAttribute::updateImpl(...).
5408   ChangeStatus updateImpl(Attributor &A) override {
5409     PrivatizableType = identifyPrivatizableType(A);
5410     if (!PrivatizableType.hasValue())
5411       return ChangeStatus::UNCHANGED;
5412     if (!PrivatizableType.getValue())
5413       return indicatePessimisticFixpoint();
5414 
5415     const IRPosition &IRP = getIRPosition();
5416     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5417     if (!NoCaptureAA.isAssumedNoCapture()) {
5418       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5419       return indicatePessimisticFixpoint();
5420     }
5421 
5422     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5423     if (!NoAliasAA.isAssumedNoAlias()) {
5424       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5425       return indicatePessimisticFixpoint();
5426     }
5427 
5428     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5429     if (!MemBehaviorAA.isAssumedReadOnly()) {
5430       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5431       return indicatePessimisticFixpoint();
5432     }
5433 
5434     return ChangeStatus::UNCHANGED;
5435   }
5436 
5437   /// See AbstractAttribute::trackStatistics()
5438   void trackStatistics() const override {
5439     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5440   }
5441 };
5442 
5443 struct AAPrivatizablePtrCallSiteReturned final
5444     : public AAPrivatizablePtrFloating {
5445   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5446       : AAPrivatizablePtrFloating(IRP, A) {}
5447 
5448   /// See AbstractAttribute::initialize(...).
5449   void initialize(Attributor &A) override {
5450     // TODO: We can privatize more than arguments.
5451     indicatePessimisticFixpoint();
5452   }
5453 
5454   /// See AbstractAttribute::trackStatistics()
5455   void trackStatistics() const override {
5456     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5457   }
5458 };
5459 
5460 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5461   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5462       : AAPrivatizablePtrFloating(IRP, A) {}
5463 
5464   /// See AbstractAttribute::initialize(...).
5465   void initialize(Attributor &A) override {
5466     // TODO: We can privatize more than arguments.
5467     indicatePessimisticFixpoint();
5468   }
5469 
5470   /// See AbstractAttribute::trackStatistics()
5471   void trackStatistics() const override {
5472     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5473   }
5474 };
5475 
5476 /// -------------------- Memory Behavior Attributes ----------------------------
5477 /// Includes read-none, read-only, and write-only.
5478 /// ----------------------------------------------------------------------------
5479 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5480   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5481       : AAMemoryBehavior(IRP, A) {}
5482 
5483   /// See AbstractAttribute::initialize(...).
5484   void initialize(Attributor &A) override {
5485     intersectAssumedBits(BEST_STATE);
5486     getKnownStateFromValue(getIRPosition(), getState());
5487     IRAttribute::initialize(A);
5488   }
5489 
5490   /// Return the memory behavior information encoded in the IR for \p IRP.
5491   static void getKnownStateFromValue(const IRPosition &IRP,
5492                                      BitIntegerState &State,
5493                                      bool IgnoreSubsumingPositions = false) {
5494     SmallVector<Attribute, 2> Attrs;
5495     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5496     for (const Attribute &Attr : Attrs) {
5497       switch (Attr.getKindAsEnum()) {
5498       case Attribute::ReadNone:
5499         State.addKnownBits(NO_ACCESSES);
5500         break;
5501       case Attribute::ReadOnly:
5502         State.addKnownBits(NO_WRITES);
5503         break;
5504       case Attribute::WriteOnly:
5505         State.addKnownBits(NO_READS);
5506         break;
5507       default:
5508         llvm_unreachable("Unexpected attribute!");
5509       }
5510     }
5511 
5512     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5513       if (!I->mayReadFromMemory())
5514         State.addKnownBits(NO_READS);
5515       if (!I->mayWriteToMemory())
5516         State.addKnownBits(NO_WRITES);
5517     }
5518   }
5519 
5520   /// See AbstractAttribute::getDeducedAttributes(...).
5521   void getDeducedAttributes(LLVMContext &Ctx,
5522                             SmallVectorImpl<Attribute> &Attrs) const override {
5523     assert(Attrs.size() == 0);
5524     if (isAssumedReadNone())
5525       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5526     else if (isAssumedReadOnly())
5527       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5528     else if (isAssumedWriteOnly())
5529       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5530     assert(Attrs.size() <= 1);
5531   }
5532 
5533   /// See AbstractAttribute::manifest(...).
5534   ChangeStatus manifest(Attributor &A) override {
5535     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5536       return ChangeStatus::UNCHANGED;
5537 
5538     const IRPosition &IRP = getIRPosition();
5539 
5540     // Check if we would improve the existing attributes first.
5541     SmallVector<Attribute, 4> DeducedAttrs;
5542     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5543     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5544           return IRP.hasAttr(Attr.getKindAsEnum(),
5545                              /* IgnoreSubsumingPositions */ true);
5546         }))
5547       return ChangeStatus::UNCHANGED;
5548 
5549     // Clear existing attributes.
5550     IRP.removeAttrs(AttrKinds);
5551 
5552     // Use the generic manifest method.
5553     return IRAttribute::manifest(A);
5554   }
5555 
5556   /// See AbstractState::getAsStr().
5557   const std::string getAsStr() const override {
5558     if (isAssumedReadNone())
5559       return "readnone";
5560     if (isAssumedReadOnly())
5561       return "readonly";
5562     if (isAssumedWriteOnly())
5563       return "writeonly";
5564     return "may-read/write";
5565   }
5566 
5567   /// The set of IR attributes AAMemoryBehavior deals with.
5568   static const Attribute::AttrKind AttrKinds[3];
5569 };
5570 
5571 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5572     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5573 
5574 /// Memory behavior attribute for a floating value.
5575 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5576   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5577       : AAMemoryBehaviorImpl(IRP, A) {}
5578 
5579   /// See AbstractAttribute::initialize(...).
5580   void initialize(Attributor &A) override {
5581     AAMemoryBehaviorImpl::initialize(A);
5582     // Initialize the use vector with all direct uses of the associated value.
5583     for (const Use &U : getAssociatedValue().uses())
5584       Uses.insert(&U);
5585   }
5586 
5587   /// See AbstractAttribute::updateImpl(...).
5588   ChangeStatus updateImpl(Attributor &A) override;
5589 
5590   /// See AbstractAttribute::trackStatistics()
5591   void trackStatistics() const override {
5592     if (isAssumedReadNone())
5593       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5594     else if (isAssumedReadOnly())
5595       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5596     else if (isAssumedWriteOnly())
5597       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5598   }
5599 
5600 private:
5601   /// Return true if users of \p UserI might access the underlying
5602   /// variable/location described by \p U and should therefore be analyzed.
5603   bool followUsersOfUseIn(Attributor &A, const Use *U,
5604                           const Instruction *UserI);
5605 
5606   /// Update the state according to the effect of use \p U in \p UserI.
5607   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5608 
5609 protected:
5610   /// Container for (transitive) uses of the associated argument.
5611   SetVector<const Use *> Uses;
5612 };
5613 
5614 /// Memory behavior attribute for function argument.
5615 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5616   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5617       : AAMemoryBehaviorFloating(IRP, A) {}
5618 
5619   /// See AbstractAttribute::initialize(...).
5620   void initialize(Attributor &A) override {
5621     intersectAssumedBits(BEST_STATE);
5622     const IRPosition &IRP = getIRPosition();
5623     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5624     // can query it when we use has/getAttr. That would allow us to reuse the
5625     // initialize of the base class here.
5626     bool HasByVal =
5627         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5628     getKnownStateFromValue(IRP, getState(),
5629                            /* IgnoreSubsumingPositions */ HasByVal);
5630 
5631     // Initialize the use vector with all direct uses of the associated value.
5632     Argument *Arg = getAssociatedArgument();
5633     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5634       indicatePessimisticFixpoint();
5635     } else {
5636       // Initialize the use vector with all direct uses of the associated value.
5637       for (const Use &U : Arg->uses())
5638         Uses.insert(&U);
5639     }
5640   }
5641 
5642   ChangeStatus manifest(Attributor &A) override {
5643     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5644     if (!getAssociatedValue().getType()->isPointerTy())
5645       return ChangeStatus::UNCHANGED;
5646 
5647     // TODO: From readattrs.ll: "inalloca parameters are always
5648     //                           considered written"
5649     if (hasAttr({Attribute::InAlloca})) {
5650       removeKnownBits(NO_WRITES);
5651       removeAssumedBits(NO_WRITES);
5652     }
5653     return AAMemoryBehaviorFloating::manifest(A);
5654   }
5655 
5656   /// See AbstractAttribute::trackStatistics()
5657   void trackStatistics() const override {
5658     if (isAssumedReadNone())
5659       STATS_DECLTRACK_ARG_ATTR(readnone)
5660     else if (isAssumedReadOnly())
5661       STATS_DECLTRACK_ARG_ATTR(readonly)
5662     else if (isAssumedWriteOnly())
5663       STATS_DECLTRACK_ARG_ATTR(writeonly)
5664   }
5665 };
5666 
5667 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5668   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5669       : AAMemoryBehaviorArgument(IRP, A) {}
5670 
5671   /// See AbstractAttribute::initialize(...).
5672   void initialize(Attributor &A) override {
5673     if (Argument *Arg = getAssociatedArgument()) {
5674       if (Arg->hasByValAttr()) {
5675         addKnownBits(NO_WRITES);
5676         removeKnownBits(NO_READS);
5677         removeAssumedBits(NO_READS);
5678       }
5679     } else {
5680     }
5681     AAMemoryBehaviorArgument::initialize(A);
5682   }
5683 
5684   /// See AbstractAttribute::updateImpl(...).
5685   ChangeStatus updateImpl(Attributor &A) override {
5686     // TODO: Once we have call site specific value information we can provide
5687     //       call site specific liveness liveness information and then it makes
5688     //       sense to specialize attributes for call sites arguments instead of
5689     //       redirecting requests to the callee argument.
5690     Argument *Arg = getAssociatedArgument();
5691     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5692     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5693     return clampStateAndIndicateChange(
5694         getState(),
5695         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5696   }
5697 
5698   /// See AbstractAttribute::trackStatistics()
5699   void trackStatistics() const override {
5700     if (isAssumedReadNone())
5701       STATS_DECLTRACK_CSARG_ATTR(readnone)
5702     else if (isAssumedReadOnly())
5703       STATS_DECLTRACK_CSARG_ATTR(readonly)
5704     else if (isAssumedWriteOnly())
5705       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5706   }
5707 };
5708 
5709 /// Memory behavior attribute for a call site return position.
5710 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5711   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5712       : AAMemoryBehaviorFloating(IRP, A) {}
5713 
5714   /// See AbstractAttribute::manifest(...).
5715   ChangeStatus manifest(Attributor &A) override {
5716     // We do not annotate returned values.
5717     return ChangeStatus::UNCHANGED;
5718   }
5719 
5720   /// See AbstractAttribute::trackStatistics()
5721   void trackStatistics() const override {}
5722 };
5723 
5724 /// An AA to represent the memory behavior function attributes.
5725 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5726   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5727       : AAMemoryBehaviorImpl(IRP, A) {}
5728 
5729   /// See AbstractAttribute::updateImpl(Attributor &A).
5730   virtual ChangeStatus updateImpl(Attributor &A) override;
5731 
5732   /// See AbstractAttribute::manifest(...).
5733   ChangeStatus manifest(Attributor &A) override {
5734     Function &F = cast<Function>(getAnchorValue());
5735     if (isAssumedReadNone()) {
5736       F.removeFnAttr(Attribute::ArgMemOnly);
5737       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5738       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5739     }
5740     return AAMemoryBehaviorImpl::manifest(A);
5741   }
5742 
5743   /// See AbstractAttribute::trackStatistics()
5744   void trackStatistics() const override {
5745     if (isAssumedReadNone())
5746       STATS_DECLTRACK_FN_ATTR(readnone)
5747     else if (isAssumedReadOnly())
5748       STATS_DECLTRACK_FN_ATTR(readonly)
5749     else if (isAssumedWriteOnly())
5750       STATS_DECLTRACK_FN_ATTR(writeonly)
5751   }
5752 };
5753 
5754 /// AAMemoryBehavior attribute for call sites.
5755 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5756   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5757       : AAMemoryBehaviorImpl(IRP, A) {}
5758 
5759   /// See AbstractAttribute::initialize(...).
5760   void initialize(Attributor &A) override {
5761     AAMemoryBehaviorImpl::initialize(A);
5762     Function *F = getAssociatedFunction();
5763     if (!F || !A.isFunctionIPOAmendable(*F))
5764       indicatePessimisticFixpoint();
5765   }
5766 
5767   /// See AbstractAttribute::updateImpl(...).
5768   ChangeStatus updateImpl(Attributor &A) override {
5769     // TODO: Once we have call site specific value information we can provide
5770     //       call site specific liveness liveness information and then it makes
5771     //       sense to specialize attributes for call sites arguments instead of
5772     //       redirecting requests to the callee argument.
5773     Function *F = getAssociatedFunction();
5774     const IRPosition &FnPos = IRPosition::function(*F);
5775     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5776     return clampStateAndIndicateChange(
5777         getState(),
5778         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5779   }
5780 
5781   /// See AbstractAttribute::trackStatistics()
5782   void trackStatistics() const override {
5783     if (isAssumedReadNone())
5784       STATS_DECLTRACK_CS_ATTR(readnone)
5785     else if (isAssumedReadOnly())
5786       STATS_DECLTRACK_CS_ATTR(readonly)
5787     else if (isAssumedWriteOnly())
5788       STATS_DECLTRACK_CS_ATTR(writeonly)
5789   }
5790 };
5791 
5792 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5793 
5794   // The current assumed state used to determine a change.
5795   auto AssumedState = getAssumed();
5796 
5797   auto CheckRWInst = [&](Instruction &I) {
5798     // If the instruction has an own memory behavior state, use it to restrict
5799     // the local state. No further analysis is required as the other memory
5800     // state is as optimistic as it gets.
5801     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5802       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5803           *this, IRPosition::callsite_function(*CB));
5804       intersectAssumedBits(MemBehaviorAA.getAssumed());
5805       return !isAtFixpoint();
5806     }
5807 
5808     // Remove access kind modifiers if necessary.
5809     if (I.mayReadFromMemory())
5810       removeAssumedBits(NO_READS);
5811     if (I.mayWriteToMemory())
5812       removeAssumedBits(NO_WRITES);
5813     return !isAtFixpoint();
5814   };
5815 
5816   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5817     return indicatePessimisticFixpoint();
5818 
5819   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5820                                         : ChangeStatus::UNCHANGED;
5821 }
5822 
5823 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5824 
5825   const IRPosition &IRP = getIRPosition();
5826   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5827   AAMemoryBehavior::StateType &S = getState();
5828 
5829   // First, check the function scope. We take the known information and we avoid
5830   // work if the assumed information implies the current assumed information for
5831   // this attribute. This is a valid for all but byval arguments.
5832   Argument *Arg = IRP.getAssociatedArgument();
5833   AAMemoryBehavior::base_t FnMemAssumedState =
5834       AAMemoryBehavior::StateType::getWorstState();
5835   if (!Arg || !Arg->hasByValAttr()) {
5836     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5837         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5838     FnMemAssumedState = FnMemAA.getAssumed();
5839     S.addKnownBits(FnMemAA.getKnown());
5840     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5841       return ChangeStatus::UNCHANGED;
5842   }
5843 
5844   // Make sure the value is not captured (except through "return"), if
5845   // it is, any information derived would be irrelevant anyway as we cannot
5846   // check the potential aliases introduced by the capture. However, no need
5847   // to fall back to anythign less optimistic than the function state.
5848   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5849       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5850   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5851     S.intersectAssumedBits(FnMemAssumedState);
5852     return ChangeStatus::CHANGED;
5853   }
5854 
5855   // The current assumed state used to determine a change.
5856   auto AssumedState = S.getAssumed();
5857 
5858   // Liveness information to exclude dead users.
5859   // TODO: Take the FnPos once we have call site specific liveness information.
5860   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5861       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5862       /* TrackDependence */ false);
5863 
5864   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5865   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5866     const Use *U = Uses[i];
5867     Instruction *UserI = cast<Instruction>(U->getUser());
5868     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5869                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5870                       << "]\n");
5871     if (A.isAssumedDead(*U, this, &LivenessAA))
5872       continue;
5873 
5874     // Droppable users, e.g., llvm::assume does not actually perform any action.
5875     if (UserI->isDroppable())
5876       continue;
5877 
5878     // Check if the users of UserI should also be visited.
5879     if (followUsersOfUseIn(A, U, UserI))
5880       for (const Use &UserIUse : UserI->uses())
5881         Uses.insert(&UserIUse);
5882 
5883     // If UserI might touch memory we analyze the use in detail.
5884     if (UserI->mayReadOrWriteMemory())
5885       analyzeUseIn(A, U, UserI);
5886   }
5887 
5888   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5889                                         : ChangeStatus::UNCHANGED;
5890 }
5891 
5892 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5893                                                   const Instruction *UserI) {
5894   // The loaded value is unrelated to the pointer argument, no need to
5895   // follow the users of the load.
5896   if (isa<LoadInst>(UserI))
5897     return false;
5898 
5899   // By default we follow all uses assuming UserI might leak information on U,
5900   // we have special handling for call sites operands though.
5901   const auto *CB = dyn_cast<CallBase>(UserI);
5902   if (!CB || !CB->isArgOperand(U))
5903     return true;
5904 
5905   // If the use is a call argument known not to be captured, the users of
5906   // the call do not need to be visited because they have to be unrelated to
5907   // the input. Note that this check is not trivial even though we disallow
5908   // general capturing of the underlying argument. The reason is that the
5909   // call might the argument "through return", which we allow and for which we
5910   // need to check call users.
5911   if (U->get()->getType()->isPointerTy()) {
5912     unsigned ArgNo = CB->getArgOperandNo(U);
5913     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5914         *this, IRPosition::callsite_argument(*CB, ArgNo),
5915         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5916     return !ArgNoCaptureAA.isAssumedNoCapture();
5917   }
5918 
5919   return true;
5920 }
5921 
5922 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5923                                             const Instruction *UserI) {
5924   assert(UserI->mayReadOrWriteMemory());
5925 
5926   switch (UserI->getOpcode()) {
5927   default:
5928     // TODO: Handle all atomics and other side-effect operations we know of.
5929     break;
5930   case Instruction::Load:
5931     // Loads cause the NO_READS property to disappear.
5932     removeAssumedBits(NO_READS);
5933     return;
5934 
5935   case Instruction::Store:
5936     // Stores cause the NO_WRITES property to disappear if the use is the
5937     // pointer operand. Note that we do assume that capturing was taken care of
5938     // somewhere else.
5939     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5940       removeAssumedBits(NO_WRITES);
5941     return;
5942 
5943   case Instruction::Call:
5944   case Instruction::CallBr:
5945   case Instruction::Invoke: {
5946     // For call sites we look at the argument memory behavior attribute (this
5947     // could be recursive!) in order to restrict our own state.
5948     const auto *CB = cast<CallBase>(UserI);
5949 
5950     // Give up on operand bundles.
5951     if (CB->isBundleOperand(U)) {
5952       indicatePessimisticFixpoint();
5953       return;
5954     }
5955 
5956     // Calling a function does read the function pointer, maybe write it if the
5957     // function is self-modifying.
5958     if (CB->isCallee(U)) {
5959       removeAssumedBits(NO_READS);
5960       break;
5961     }
5962 
5963     // Adjust the possible access behavior based on the information on the
5964     // argument.
5965     IRPosition Pos;
5966     if (U->get()->getType()->isPointerTy())
5967       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5968     else
5969       Pos = IRPosition::callsite_function(*CB);
5970     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5971         *this, Pos,
5972         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5973     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5974     // and at least "known".
5975     intersectAssumedBits(MemBehaviorAA.getAssumed());
5976     return;
5977   }
5978   };
5979 
5980   // Generally, look at the "may-properties" and adjust the assumed state if we
5981   // did not trigger special handling before.
5982   if (UserI->mayReadFromMemory())
5983     removeAssumedBits(NO_READS);
5984   if (UserI->mayWriteToMemory())
5985     removeAssumedBits(NO_WRITES);
5986 }
5987 
5988 } // namespace
5989 
5990 /// -------------------- Memory Locations Attributes ---------------------------
5991 /// Includes read-none, argmemonly, inaccessiblememonly,
5992 /// inaccessiblememorargmemonly
5993 /// ----------------------------------------------------------------------------
5994 
5995 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5996     AAMemoryLocation::MemoryLocationsKind MLK) {
5997   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5998     return "all memory";
5999   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6000     return "no memory";
6001   std::string S = "memory:";
6002   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6003     S += "stack,";
6004   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6005     S += "constant,";
6006   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6007     S += "internal global,";
6008   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6009     S += "external global,";
6010   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6011     S += "argument,";
6012   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6013     S += "inaccessible,";
6014   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6015     S += "malloced,";
6016   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6017     S += "unknown,";
6018   S.pop_back();
6019   return S;
6020 }
6021 
6022 namespace {
6023 struct AAMemoryLocationImpl : public AAMemoryLocation {
6024 
6025   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6026       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6027     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6028       AccessKind2Accesses[u] = nullptr;
6029   }
6030 
6031   ~AAMemoryLocationImpl() {
6032     // The AccessSets are allocated via a BumpPtrAllocator, we call
6033     // the destructor manually.
6034     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6035       if (AccessKind2Accesses[u])
6036         AccessKind2Accesses[u]->~AccessSet();
6037   }
6038 
6039   /// See AbstractAttribute::initialize(...).
6040   void initialize(Attributor &A) override {
6041     intersectAssumedBits(BEST_STATE);
6042     getKnownStateFromValue(getIRPosition(), getState());
6043     IRAttribute::initialize(A);
6044   }
6045 
6046   /// Return the memory behavior information encoded in the IR for \p IRP.
6047   static void getKnownStateFromValue(const IRPosition &IRP,
6048                                      BitIntegerState &State,
6049                                      bool IgnoreSubsumingPositions = false) {
6050     SmallVector<Attribute, 2> Attrs;
6051     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6052     for (const Attribute &Attr : Attrs) {
6053       switch (Attr.getKindAsEnum()) {
6054       case Attribute::ReadNone:
6055         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6056         break;
6057       case Attribute::InaccessibleMemOnly:
6058         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6059         break;
6060       case Attribute::ArgMemOnly:
6061         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6062         break;
6063       case Attribute::InaccessibleMemOrArgMemOnly:
6064         State.addKnownBits(
6065             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6066         break;
6067       default:
6068         llvm_unreachable("Unexpected attribute!");
6069       }
6070     }
6071   }
6072 
6073   /// See AbstractAttribute::getDeducedAttributes(...).
6074   void getDeducedAttributes(LLVMContext &Ctx,
6075                             SmallVectorImpl<Attribute> &Attrs) const override {
6076     assert(Attrs.size() == 0);
6077     if (isAssumedReadNone()) {
6078       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6079     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6080       if (isAssumedInaccessibleMemOnly())
6081         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6082       else if (isAssumedArgMemOnly())
6083         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6084       else if (isAssumedInaccessibleOrArgMemOnly())
6085         Attrs.push_back(
6086             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6087     }
6088     assert(Attrs.size() <= 1);
6089   }
6090 
6091   /// See AbstractAttribute::manifest(...).
6092   ChangeStatus manifest(Attributor &A) override {
6093     const IRPosition &IRP = getIRPosition();
6094 
6095     // Check if we would improve the existing attributes first.
6096     SmallVector<Attribute, 4> DeducedAttrs;
6097     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6098     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6099           return IRP.hasAttr(Attr.getKindAsEnum(),
6100                              /* IgnoreSubsumingPositions */ true);
6101         }))
6102       return ChangeStatus::UNCHANGED;
6103 
6104     // Clear existing attributes.
6105     IRP.removeAttrs(AttrKinds);
6106     if (isAssumedReadNone())
6107       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6108 
6109     // Use the generic manifest method.
6110     return IRAttribute::manifest(A);
6111   }
6112 
6113   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6114   bool checkForAllAccessesToMemoryKind(
6115       function_ref<bool(const Instruction *, const Value *, AccessKind,
6116                         MemoryLocationsKind)>
6117           Pred,
6118       MemoryLocationsKind RequestedMLK) const override {
6119     if (!isValidState())
6120       return false;
6121 
6122     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6123     if (AssumedMLK == NO_LOCATIONS)
6124       return true;
6125 
6126     unsigned Idx = 0;
6127     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6128          CurMLK *= 2, ++Idx) {
6129       if (CurMLK & RequestedMLK)
6130         continue;
6131 
6132       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6133         for (const AccessInfo &AI : *Accesses)
6134           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6135             return false;
6136     }
6137 
6138     return true;
6139   }
6140 
6141   ChangeStatus indicatePessimisticFixpoint() override {
6142     // If we give up and indicate a pessimistic fixpoint this instruction will
6143     // become an access for all potential access kinds:
6144     // TODO: Add pointers for argmemonly and globals to improve the results of
6145     //       checkForAllAccessesToMemoryKind.
6146     bool Changed = false;
6147     MemoryLocationsKind KnownMLK = getKnown();
6148     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6149     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6150       if (!(CurMLK & KnownMLK))
6151         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed);
6152     return AAMemoryLocation::indicatePessimisticFixpoint();
6153   }
6154 
6155 protected:
6156   /// Helper struct to tie together an instruction that has a read or write
6157   /// effect with the pointer it accesses (if any).
6158   struct AccessInfo {
6159 
6160     /// The instruction that caused the access.
6161     const Instruction *I;
6162 
6163     /// The base pointer that is accessed, or null if unknown.
6164     const Value *Ptr;
6165 
6166     /// The kind of access (read/write/read+write).
6167     AccessKind Kind;
6168 
6169     bool operator==(const AccessInfo &RHS) const {
6170       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6171     }
6172     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6173       if (LHS.I != RHS.I)
6174         return LHS.I < RHS.I;
6175       if (LHS.Ptr != RHS.Ptr)
6176         return LHS.Ptr < RHS.Ptr;
6177       if (LHS.Kind != RHS.Kind)
6178         return LHS.Kind < RHS.Kind;
6179       return false;
6180     }
6181   };
6182 
6183   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6184   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6185   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6186   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6187 
6188   /// Return the kind(s) of location that may be accessed by \p V.
6189   AAMemoryLocation::MemoryLocationsKind
6190   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6191 
6192   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6193   /// an access to a \p MLK memory location with the access pointer \p Ptr.
6194   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6195                                  MemoryLocationsKind MLK, const Instruction *I,
6196                                  const Value *Ptr, bool &Changed) {
6197     // TODO: The kind should be determined at the call sites based on the
6198     // information we have there.
6199     AccessKind Kind = READ_WRITE;
6200     if (I) {
6201       Kind = I->mayReadFromMemory() ? READ : NONE;
6202       Kind = AccessKind(Kind | (I->mayWriteToMemory() ? WRITE : NONE));
6203     }
6204 
6205     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6206     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6207     if (!Accesses)
6208       Accesses = new (Allocator) AccessSet();
6209     Changed |= Accesses->insert(AccessInfo{I, Ptr, Kind}).second;
6210     State.removeAssumedBits(MLK);
6211   }
6212 
6213   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6214   /// arguments, and update the state and access map accordingly.
6215   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6216                           AAMemoryLocation::StateType &State, bool &Changed);
6217 
6218   /// Used to allocate access sets.
6219   BumpPtrAllocator &Allocator;
6220 
6221   /// The set of IR attributes AAMemoryLocation deals with.
6222   static const Attribute::AttrKind AttrKinds[4];
6223 };
6224 
6225 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6226     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6227     Attribute::InaccessibleMemOrArgMemOnly};
6228 
6229 void AAMemoryLocationImpl::categorizePtrValue(
6230     Attributor &A, const Instruction &I, const Value &Ptr,
6231     AAMemoryLocation::StateType &State, bool &Changed) {
6232   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6233                     << Ptr << " ["
6234                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6235 
6236   auto StripGEPCB = [](Value *V) -> Value * {
6237     auto *GEP = dyn_cast<GEPOperator>(V);
6238     while (GEP) {
6239       V = GEP->getPointerOperand();
6240       GEP = dyn_cast<GEPOperator>(V);
6241     }
6242     return V;
6243   };
6244 
6245   auto VisitValueCB = [&](Value &V, const Instruction *,
6246                           AAMemoryLocation::StateType &T,
6247                           bool Stripped) -> bool {
6248     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6249     if (isa<UndefValue>(V))
6250       return true;
6251     if (auto *Arg = dyn_cast<Argument>(&V)) {
6252       if (Arg->hasByValAttr())
6253         updateStateAndAccessesMap(T, NO_LOCAL_MEM, &I, &V, Changed);
6254       else
6255         updateStateAndAccessesMap(T, NO_ARGUMENT_MEM, &I, &V, Changed);
6256       return true;
6257     }
6258     if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6259       if (GV->hasLocalLinkage())
6260         updateStateAndAccessesMap(T, NO_GLOBAL_INTERNAL_MEM, &I, &V, Changed);
6261       else
6262         updateStateAndAccessesMap(T, NO_GLOBAL_EXTERNAL_MEM, &I, &V, Changed);
6263       return true;
6264     }
6265     if (isa<AllocaInst>(V)) {
6266       updateStateAndAccessesMap(T, NO_LOCAL_MEM, &I, &V, Changed);
6267       return true;
6268     }
6269     if (const auto *CB = dyn_cast<CallBase>(&V)) {
6270       const auto &NoAliasAA =
6271           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6272       if (NoAliasAA.isAssumedNoAlias()) {
6273         updateStateAndAccessesMap(T, NO_MALLOCED_MEM, &I, &V, Changed);
6274         return true;
6275       }
6276     }
6277 
6278     updateStateAndAccessesMap(T, NO_UNKOWN_MEM, &I, &V, Changed);
6279     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6280                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6281                       << "\n");
6282     return true;
6283   };
6284 
6285   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6286           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6287           /* MaxValues */ 32, StripGEPCB)) {
6288     LLVM_DEBUG(
6289         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6290     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed);
6291   } else {
6292     LLVM_DEBUG(
6293         dbgs()
6294         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6295         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6296   }
6297 }
6298 
6299 AAMemoryLocation::MemoryLocationsKind
6300 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6301                                                   bool &Changed) {
6302   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6303                     << I << "\n");
6304 
6305   AAMemoryLocation::StateType AccessedLocs;
6306   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6307 
6308   if (auto *CB = dyn_cast<CallBase>(&I)) {
6309 
6310     // First check if we assume any memory is access is visible.
6311     const auto &CBMemLocationAA =
6312         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6313     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6314                       << " [" << CBMemLocationAA << "]\n");
6315 
6316     if (CBMemLocationAA.isAssumedReadNone())
6317       return NO_LOCATIONS;
6318 
6319     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6320       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6321                                 Changed);
6322       return AccessedLocs.getAssumed();
6323     }
6324 
6325     uint32_t CBAssumedNotAccessedLocs =
6326         CBMemLocationAA.getAssumedNotAccessedLocation();
6327 
6328     // Set the argmemonly and global bit as we handle them separately below.
6329     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6330         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6331 
6332     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6333       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6334         continue;
6335       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed);
6336     }
6337 
6338     // Now handle global memory if it might be accessed. This is slightly tricky
6339     // as NO_GLOBAL_MEM has multiple bits set.
6340     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6341     if (HasGlobalAccesses) {
6342       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6343                             AccessKind Kind, MemoryLocationsKind MLK) {
6344         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed);
6345         return true;
6346       };
6347       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6348               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6349         return AccessedLocs.getWorstState();
6350     }
6351 
6352     LLVM_DEBUG(
6353         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6354                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6355 
6356     // Now handle argument memory if it might be accessed.
6357     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6358     if (HasArgAccesses) {
6359       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6360            ++ArgNo) {
6361 
6362         // Skip non-pointer arguments.
6363         const Value *ArgOp = CB->getArgOperand(ArgNo);
6364         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6365           continue;
6366 
6367         // Skip readnone arguments.
6368         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6369         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6370             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6371 
6372         if (ArgOpMemLocationAA.isAssumedReadNone())
6373           continue;
6374 
6375         // Categorize potentially accessed pointer arguments as if there was an
6376         // access instruction with them as pointer.
6377         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6378       }
6379     }
6380 
6381     LLVM_DEBUG(
6382         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6383                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6384 
6385     return AccessedLocs.getAssumed();
6386   }
6387 
6388   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6389     LLVM_DEBUG(
6390         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6391                << I << " [" << *Ptr << "]\n");
6392     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6393     return AccessedLocs.getAssumed();
6394   }
6395 
6396   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6397                     << I << "\n");
6398   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed);
6399   return AccessedLocs.getAssumed();
6400 }
6401 
6402 /// An AA to represent the memory behavior function attributes.
6403 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6404   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6405       : AAMemoryLocationImpl(IRP, A) {}
6406 
6407   /// See AbstractAttribute::updateImpl(Attributor &A).
6408   virtual ChangeStatus updateImpl(Attributor &A) override {
6409 
6410     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6411         *this, getIRPosition(), /* TrackDependence */ false);
6412     if (MemBehaviorAA.isAssumedReadNone()) {
6413       if (MemBehaviorAA.isKnownReadNone())
6414         return indicateOptimisticFixpoint();
6415       assert(isAssumedReadNone() &&
6416              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6417       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6418       return ChangeStatus::UNCHANGED;
6419     }
6420 
6421     // The current assumed state used to determine a change.
6422     auto AssumedState = getAssumed();
6423     bool Changed = false;
6424 
6425     auto CheckRWInst = [&](Instruction &I) {
6426       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6427       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6428                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6429       removeAssumedBits(inverseLocation(MLK, false, false));
6430       return true;
6431     };
6432 
6433     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6434       return indicatePessimisticFixpoint();
6435 
6436     Changed |= AssumedState != getAssumed();
6437     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6438   }
6439 
6440   /// See AbstractAttribute::trackStatistics()
6441   void trackStatistics() const override {
6442     if (isAssumedReadNone())
6443       STATS_DECLTRACK_FN_ATTR(readnone)
6444     else if (isAssumedArgMemOnly())
6445       STATS_DECLTRACK_FN_ATTR(argmemonly)
6446     else if (isAssumedInaccessibleMemOnly())
6447       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6448     else if (isAssumedInaccessibleOrArgMemOnly())
6449       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6450   }
6451 };
6452 
6453 /// AAMemoryLocation attribute for call sites.
6454 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6455   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6456       : AAMemoryLocationImpl(IRP, A) {}
6457 
6458   /// See AbstractAttribute::initialize(...).
6459   void initialize(Attributor &A) override {
6460     AAMemoryLocationImpl::initialize(A);
6461     Function *F = getAssociatedFunction();
6462     if (!F || !A.isFunctionIPOAmendable(*F))
6463       indicatePessimisticFixpoint();
6464   }
6465 
6466   /// See AbstractAttribute::updateImpl(...).
6467   ChangeStatus updateImpl(Attributor &A) override {
6468     // TODO: Once we have call site specific value information we can provide
6469     //       call site specific liveness liveness information and then it makes
6470     //       sense to specialize attributes for call sites arguments instead of
6471     //       redirecting requests to the callee argument.
6472     Function *F = getAssociatedFunction();
6473     const IRPosition &FnPos = IRPosition::function(*F);
6474     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6475     bool Changed = false;
6476     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6477                           AccessKind Kind, MemoryLocationsKind MLK) {
6478       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed);
6479       return true;
6480     };
6481     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6482       return indicatePessimisticFixpoint();
6483     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6484   }
6485 
6486   /// See AbstractAttribute::trackStatistics()
6487   void trackStatistics() const override {
6488     if (isAssumedReadNone())
6489       STATS_DECLTRACK_CS_ATTR(readnone)
6490   }
6491 };
6492 
6493 /// ------------------ Value Constant Range Attribute -------------------------
6494 
6495 struct AAValueConstantRangeImpl : AAValueConstantRange {
6496   using StateType = IntegerRangeState;
6497   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6498       : AAValueConstantRange(IRP, A) {}
6499 
6500   /// See AbstractAttribute::getAsStr().
6501   const std::string getAsStr() const override {
6502     std::string Str;
6503     llvm::raw_string_ostream OS(Str);
6504     OS << "range(" << getBitWidth() << ")<";
6505     getKnown().print(OS);
6506     OS << " / ";
6507     getAssumed().print(OS);
6508     OS << ">";
6509     return OS.str();
6510   }
6511 
6512   /// Helper function to get a SCEV expr for the associated value at program
6513   /// point \p I.
6514   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6515     if (!getAnchorScope())
6516       return nullptr;
6517 
6518     ScalarEvolution *SE =
6519         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6520             *getAnchorScope());
6521 
6522     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6523         *getAnchorScope());
6524 
6525     if (!SE || !LI)
6526       return nullptr;
6527 
6528     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6529     if (!I)
6530       return S;
6531 
6532     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6533   }
6534 
6535   /// Helper function to get a range from SCEV for the associated value at
6536   /// program point \p I.
6537   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6538                                          const Instruction *I = nullptr) const {
6539     if (!getAnchorScope())
6540       return getWorstState(getBitWidth());
6541 
6542     ScalarEvolution *SE =
6543         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6544             *getAnchorScope());
6545 
6546     const SCEV *S = getSCEV(A, I);
6547     if (!SE || !S)
6548       return getWorstState(getBitWidth());
6549 
6550     return SE->getUnsignedRange(S);
6551   }
6552 
6553   /// Helper function to get a range from LVI for the associated value at
6554   /// program point \p I.
6555   ConstantRange
6556   getConstantRangeFromLVI(Attributor &A,
6557                           const Instruction *CtxI = nullptr) const {
6558     if (!getAnchorScope())
6559       return getWorstState(getBitWidth());
6560 
6561     LazyValueInfo *LVI =
6562         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6563             *getAnchorScope());
6564 
6565     if (!LVI || !CtxI)
6566       return getWorstState(getBitWidth());
6567     return LVI->getConstantRange(&getAssociatedValue(),
6568                                  const_cast<BasicBlock *>(CtxI->getParent()),
6569                                  const_cast<Instruction *>(CtxI));
6570   }
6571 
6572   /// See AAValueConstantRange::getKnownConstantRange(..).
6573   ConstantRange
6574   getKnownConstantRange(Attributor &A,
6575                         const Instruction *CtxI = nullptr) const override {
6576     if (!CtxI || CtxI == getCtxI())
6577       return getKnown();
6578 
6579     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6580     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6581     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6582   }
6583 
6584   /// See AAValueConstantRange::getAssumedConstantRange(..).
6585   ConstantRange
6586   getAssumedConstantRange(Attributor &A,
6587                           const Instruction *CtxI = nullptr) const override {
6588     // TODO: Make SCEV use Attributor assumption.
6589     //       We may be able to bound a variable range via assumptions in
6590     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6591     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6592 
6593     if (!CtxI || CtxI == getCtxI())
6594       return getAssumed();
6595 
6596     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6597     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6598     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6599   }
6600 
6601   /// See AbstractAttribute::initialize(..).
6602   void initialize(Attributor &A) override {
6603     // Intersect a range given by SCEV.
6604     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6605 
6606     // Intersect a range given by LVI.
6607     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6608   }
6609 
6610   /// Helper function to create MDNode for range metadata.
6611   static MDNode *
6612   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6613                             const ConstantRange &AssumedConstantRange) {
6614     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6615                                   Ty, AssumedConstantRange.getLower())),
6616                               ConstantAsMetadata::get(ConstantInt::get(
6617                                   Ty, AssumedConstantRange.getUpper()))};
6618     return MDNode::get(Ctx, LowAndHigh);
6619   }
6620 
6621   /// Return true if \p Assumed is included in \p KnownRanges.
6622   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6623 
6624     if (Assumed.isFullSet())
6625       return false;
6626 
6627     if (!KnownRanges)
6628       return true;
6629 
6630     // If multiple ranges are annotated in IR, we give up to annotate assumed
6631     // range for now.
6632 
6633     // TODO:  If there exists a known range which containts assumed range, we
6634     // can say assumed range is better.
6635     if (KnownRanges->getNumOperands() > 2)
6636       return false;
6637 
6638     ConstantInt *Lower =
6639         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6640     ConstantInt *Upper =
6641         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6642 
6643     ConstantRange Known(Lower->getValue(), Upper->getValue());
6644     return Known.contains(Assumed) && Known != Assumed;
6645   }
6646 
6647   /// Helper function to set range metadata.
6648   static bool
6649   setRangeMetadataIfisBetterRange(Instruction *I,
6650                                   const ConstantRange &AssumedConstantRange) {
6651     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6652     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6653       if (!AssumedConstantRange.isEmptySet()) {
6654         I->setMetadata(LLVMContext::MD_range,
6655                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6656                                                  AssumedConstantRange));
6657         return true;
6658       }
6659     }
6660     return false;
6661   }
6662 
6663   /// See AbstractAttribute::manifest()
6664   ChangeStatus manifest(Attributor &A) override {
6665     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6666     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6667     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6668 
6669     auto &V = getAssociatedValue();
6670     if (!AssumedConstantRange.isEmptySet() &&
6671         !AssumedConstantRange.isSingleElement()) {
6672       if (Instruction *I = dyn_cast<Instruction>(&V))
6673         if (isa<CallInst>(I) || isa<LoadInst>(I))
6674           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6675             Changed = ChangeStatus::CHANGED;
6676     }
6677 
6678     return Changed;
6679   }
6680 };
6681 
6682 struct AAValueConstantRangeArgument final
6683     : AAArgumentFromCallSiteArguments<
6684           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6685   using Base = AAArgumentFromCallSiteArguments<
6686       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6687   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6688       : Base(IRP, A) {}
6689 
6690   /// See AbstractAttribute::initialize(..).
6691   void initialize(Attributor &A) override {
6692     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6693       indicatePessimisticFixpoint();
6694     } else {
6695       Base::initialize(A);
6696     }
6697   }
6698 
6699   /// See AbstractAttribute::trackStatistics()
6700   void trackStatistics() const override {
6701     STATS_DECLTRACK_ARG_ATTR(value_range)
6702   }
6703 };
6704 
6705 struct AAValueConstantRangeReturned
6706     : AAReturnedFromReturnedValues<AAValueConstantRange,
6707                                    AAValueConstantRangeImpl> {
6708   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6709                                             AAValueConstantRangeImpl>;
6710   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6711       : Base(IRP, A) {}
6712 
6713   /// See AbstractAttribute::initialize(...).
6714   void initialize(Attributor &A) override {}
6715 
6716   /// See AbstractAttribute::trackStatistics()
6717   void trackStatistics() const override {
6718     STATS_DECLTRACK_FNRET_ATTR(value_range)
6719   }
6720 };
6721 
6722 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6723   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6724       : AAValueConstantRangeImpl(IRP, A) {}
6725 
6726   /// See AbstractAttribute::initialize(...).
6727   void initialize(Attributor &A) override {
6728     AAValueConstantRangeImpl::initialize(A);
6729     Value &V = getAssociatedValue();
6730 
6731     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6732       unionAssumed(ConstantRange(C->getValue()));
6733       indicateOptimisticFixpoint();
6734       return;
6735     }
6736 
6737     if (isa<UndefValue>(&V)) {
6738       // Collapse the undef state to 0.
6739       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6740       indicateOptimisticFixpoint();
6741       return;
6742     }
6743 
6744     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6745       return;
6746     // If it is a load instruction with range metadata, use it.
6747     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6748       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6749         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6750         return;
6751       }
6752 
6753     // We can work with PHI and select instruction as we traverse their operands
6754     // during update.
6755     if (isa<SelectInst>(V) || isa<PHINode>(V))
6756       return;
6757 
6758     // Otherwise we give up.
6759     indicatePessimisticFixpoint();
6760 
6761     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6762                       << getAssociatedValue() << "\n");
6763   }
6764 
6765   bool calculateBinaryOperator(
6766       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6767       const Instruction *CtxI,
6768       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6769     Value *LHS = BinOp->getOperand(0);
6770     Value *RHS = BinOp->getOperand(1);
6771     // TODO: Allow non integers as well.
6772     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6773       return false;
6774 
6775     auto &LHSAA =
6776         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6777     QuerriedAAs.push_back(&LHSAA);
6778     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6779 
6780     auto &RHSAA =
6781         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6782     QuerriedAAs.push_back(&RHSAA);
6783     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6784 
6785     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6786 
6787     T.unionAssumed(AssumedRange);
6788 
6789     // TODO: Track a known state too.
6790 
6791     return T.isValidState();
6792   }
6793 
6794   bool calculateCastInst(
6795       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6796       const Instruction *CtxI,
6797       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6798     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6799     // TODO: Allow non integers as well.
6800     Value &OpV = *CastI->getOperand(0);
6801     if (!OpV.getType()->isIntegerTy())
6802       return false;
6803 
6804     auto &OpAA =
6805         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6806     QuerriedAAs.push_back(&OpAA);
6807     T.unionAssumed(
6808         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6809     return T.isValidState();
6810   }
6811 
6812   bool
6813   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6814                    const Instruction *CtxI,
6815                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6816     Value *LHS = CmpI->getOperand(0);
6817     Value *RHS = CmpI->getOperand(1);
6818     // TODO: Allow non integers as well.
6819     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6820       return false;
6821 
6822     auto &LHSAA =
6823         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6824     QuerriedAAs.push_back(&LHSAA);
6825     auto &RHSAA =
6826         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6827     QuerriedAAs.push_back(&RHSAA);
6828 
6829     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6830     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6831 
6832     // If one of them is empty set, we can't decide.
6833     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6834       return true;
6835 
6836     bool MustTrue = false, MustFalse = false;
6837 
6838     auto AllowedRegion =
6839         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6840 
6841     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6842         CmpI->getPredicate(), RHSAARange);
6843 
6844     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6845       MustFalse = true;
6846 
6847     if (SatisfyingRegion.contains(LHSAARange))
6848       MustTrue = true;
6849 
6850     assert((!MustTrue || !MustFalse) &&
6851            "Either MustTrue or MustFalse should be false!");
6852 
6853     if (MustTrue)
6854       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6855     else if (MustFalse)
6856       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6857     else
6858       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6859 
6860     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6861                       << " " << RHSAA << "\n");
6862 
6863     // TODO: Track a known state too.
6864     return T.isValidState();
6865   }
6866 
6867   /// See AbstractAttribute::updateImpl(...).
6868   ChangeStatus updateImpl(Attributor &A) override {
6869     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6870                             IntegerRangeState &T, bool Stripped) -> bool {
6871       Instruction *I = dyn_cast<Instruction>(&V);
6872       if (!I || isa<CallBase>(I)) {
6873 
6874         // If the value is not instruction, we query AA to Attributor.
6875         const auto &AA =
6876             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6877 
6878         // Clamp operator is not used to utilize a program point CtxI.
6879         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6880 
6881         return T.isValidState();
6882       }
6883 
6884       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6885       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6886         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6887           return false;
6888       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6889         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6890           return false;
6891       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6892         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6893           return false;
6894       } else {
6895         // Give up with other instructions.
6896         // TODO: Add other instructions
6897 
6898         T.indicatePessimisticFixpoint();
6899         return false;
6900       }
6901 
6902       // Catch circular reasoning in a pessimistic way for now.
6903       // TODO: Check how the range evolves and if we stripped anything, see also
6904       //       AADereferenceable or AAAlign for similar situations.
6905       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6906         if (QueriedAA != this)
6907           continue;
6908         // If we are in a stady state we do not need to worry.
6909         if (T.getAssumed() == getState().getAssumed())
6910           continue;
6911         T.indicatePessimisticFixpoint();
6912       }
6913 
6914       return T.isValidState();
6915     };
6916 
6917     IntegerRangeState T(getBitWidth());
6918 
6919     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6920             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6921       return indicatePessimisticFixpoint();
6922 
6923     return clampStateAndIndicateChange(getState(), T);
6924   }
6925 
6926   /// See AbstractAttribute::trackStatistics()
6927   void trackStatistics() const override {
6928     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6929   }
6930 };
6931 
6932 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6933   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6934       : AAValueConstantRangeImpl(IRP, A) {}
6935 
6936   /// See AbstractAttribute::initialize(...).
6937   ChangeStatus updateImpl(Attributor &A) override {
6938     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6939                      "not be called");
6940   }
6941 
6942   /// See AbstractAttribute::trackStatistics()
6943   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6944 };
6945 
6946 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6947   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6948       : AAValueConstantRangeFunction(IRP, A) {}
6949 
6950   /// See AbstractAttribute::trackStatistics()
6951   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6952 };
6953 
6954 struct AAValueConstantRangeCallSiteReturned
6955     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6956                                      AAValueConstantRangeImpl> {
6957   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6958       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6959                                        AAValueConstantRangeImpl>(IRP, A) {}
6960 
6961   /// See AbstractAttribute::initialize(...).
6962   void initialize(Attributor &A) override {
6963     // If it is a load instruction with range metadata, use the metadata.
6964     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6965       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6966         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6967 
6968     AAValueConstantRangeImpl::initialize(A);
6969   }
6970 
6971   /// See AbstractAttribute::trackStatistics()
6972   void trackStatistics() const override {
6973     STATS_DECLTRACK_CSRET_ATTR(value_range)
6974   }
6975 };
6976 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6977   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6978       : AAValueConstantRangeFloating(IRP, A) {}
6979 
6980   /// See AbstractAttribute::trackStatistics()
6981   void trackStatistics() const override {
6982     STATS_DECLTRACK_CSARG_ATTR(value_range)
6983   }
6984 };
6985 } // namespace
6986 
6987 const char AAReturnedValues::ID = 0;
6988 const char AANoUnwind::ID = 0;
6989 const char AANoSync::ID = 0;
6990 const char AANoFree::ID = 0;
6991 const char AANonNull::ID = 0;
6992 const char AANoRecurse::ID = 0;
6993 const char AAWillReturn::ID = 0;
6994 const char AAUndefinedBehavior::ID = 0;
6995 const char AANoAlias::ID = 0;
6996 const char AAReachability::ID = 0;
6997 const char AANoReturn::ID = 0;
6998 const char AAIsDead::ID = 0;
6999 const char AADereferenceable::ID = 0;
7000 const char AAAlign::ID = 0;
7001 const char AANoCapture::ID = 0;
7002 const char AAValueSimplify::ID = 0;
7003 const char AAHeapToStack::ID = 0;
7004 const char AAPrivatizablePtr::ID = 0;
7005 const char AAMemoryBehavior::ID = 0;
7006 const char AAMemoryLocation::ID = 0;
7007 const char AAValueConstantRange::ID = 0;
7008 
7009 // Macro magic to create the static generator function for attributes that
7010 // follow the naming scheme.
7011 
7012 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7013   case IRPosition::PK:                                                         \
7014     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7015 
7016 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7017   case IRPosition::PK:                                                         \
7018     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7019     ++NumAAs;                                                                  \
7020     break;
7021 
7022 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7023   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7024     CLASS *AA = nullptr;                                                       \
7025     switch (IRP.getPositionKind()) {                                           \
7026       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7027       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7028       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7029       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7030       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7031       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7032       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7033       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7034     }                                                                          \
7035     return *AA;                                                                \
7036   }
7037 
7038 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7039   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7040     CLASS *AA = nullptr;                                                       \
7041     switch (IRP.getPositionKind()) {                                           \
7042       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7043       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7044       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7045       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7046       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7047       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7048       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7049       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7050     }                                                                          \
7051     return *AA;                                                                \
7052   }
7053 
7054 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7055   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7056     CLASS *AA = nullptr;                                                       \
7057     switch (IRP.getPositionKind()) {                                           \
7058       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7059       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7060       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7061       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7062       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7063       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7064       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7065       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7066     }                                                                          \
7067     return *AA;                                                                \
7068   }
7069 
7070 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7071   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7072     CLASS *AA = nullptr;                                                       \
7073     switch (IRP.getPositionKind()) {                                           \
7074       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7075       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7076       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7077       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7078       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7079       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7080       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7081       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7082     }                                                                          \
7083     return *AA;                                                                \
7084   }
7085 
7086 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7087   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7088     CLASS *AA = nullptr;                                                       \
7089     switch (IRP.getPositionKind()) {                                           \
7090       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7091       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7092       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7093       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7094       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7095       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7096       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7097       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7098     }                                                                          \
7099     return *AA;                                                                \
7100   }
7101 
7102 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7103 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7104 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7105 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7106 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7107 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7108 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7109 
7110 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7111 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7112 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7113 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7114 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7115 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7116 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7117 
7118 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7119 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7120 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7121 
7122 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7123 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7124 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7125 
7126 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7127 
7128 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7129 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7130 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7131 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7132 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7133 #undef SWITCH_PK_CREATE
7134 #undef SWITCH_PK_INV
7135