1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumeBundleQueries.h"
19 #include "llvm/Analysis/CaptureTracking.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/MemoryBuiltins.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 
30 #include <cassert>
31 
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "attributor"
35 
36 static cl::opt<bool> ManifestInternal(
37     "attributor-manifest-internal", cl::Hidden,
38     cl::desc("Manifest Attributor internal string attributes."),
39     cl::init(false));
40 
41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
42                                        cl::Hidden);
43 
44 STATISTIC(NumAAs, "Number of abstract attributes created");
45 
46 // Some helper macros to deal with statistics tracking.
47 //
48 // Usage:
49 // For simple IR attribute tracking overload trackStatistics in the abstract
50 // attribute and choose the right STATS_DECLTRACK_********* macro,
51 // e.g.,:
52 //  void trackStatistics() const override {
53 //    STATS_DECLTRACK_ARG_ATTR(returned)
54 //  }
55 // If there is a single "increment" side one can use the macro
56 // STATS_DECLTRACK with a custom message. If there are multiple increment
57 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
58 //
59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
60   ("Number of " #TYPE " marked '" #NAME "'")
61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
63 #define STATS_DECL(NAME, TYPE, MSG)                                            \
64   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
66 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
67   {                                                                            \
68     STATS_DECL(NAME, TYPE, MSG)                                                \
69     STATS_TRACK(NAME, TYPE)                                                    \
70   }
71 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
72   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
73 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
74   STATS_DECLTRACK(NAME, CSArguments,                                           \
75                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
76 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
77   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
78 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
79   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
80 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
82                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
83 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
84   STATS_DECLTRACK(NAME, CSReturn,                                              \
85                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
87   STATS_DECLTRACK(NAME, Floating,                                              \
88                   ("Number of floating values known to be '" #NAME "'"))
89 
90 // Specialization of the operator<< for abstract attributes subclasses. This
91 // disambiguates situations where multiple operators are applicable.
92 namespace llvm {
93 #define PIPE_OPERATOR(CLASS)                                                   \
94   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
95     return OS << static_cast<const AbstractAttribute &>(AA);                   \
96   }
97 
98 PIPE_OPERATOR(AAIsDead)
99 PIPE_OPERATOR(AANoUnwind)
100 PIPE_OPERATOR(AANoSync)
101 PIPE_OPERATOR(AANoRecurse)
102 PIPE_OPERATOR(AAWillReturn)
103 PIPE_OPERATOR(AANoReturn)
104 PIPE_OPERATOR(AAReturnedValues)
105 PIPE_OPERATOR(AANonNull)
106 PIPE_OPERATOR(AANoAlias)
107 PIPE_OPERATOR(AADereferenceable)
108 PIPE_OPERATOR(AAAlign)
109 PIPE_OPERATOR(AANoCapture)
110 PIPE_OPERATOR(AAValueSimplify)
111 PIPE_OPERATOR(AANoFree)
112 PIPE_OPERATOR(AAHeapToStack)
113 PIPE_OPERATOR(AAReachability)
114 PIPE_OPERATOR(AAMemoryBehavior)
115 PIPE_OPERATOR(AAMemoryLocation)
116 PIPE_OPERATOR(AAValueConstantRange)
117 PIPE_OPERATOR(AAPrivatizablePtr)
118 PIPE_OPERATOR(AAUndefinedBehavior)
119 
120 #undef PIPE_OPERATOR
121 } // namespace llvm
122 
123 namespace {
124 
125 static Optional<ConstantInt *>
126 getAssumedConstantInt(Attributor &A, const Value &V,
127                       const AbstractAttribute &AA,
128                       bool &UsedAssumedInformation) {
129   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
130   if (C.hasValue())
131     return dyn_cast_or_null<ConstantInt>(C.getValue());
132   return llvm::None;
133 }
134 
135 /// Get pointer operand of memory accessing instruction. If \p I is
136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
137 /// is set to false and the instruction is volatile, return nullptr.
138 static const Value *getPointerOperand(const Instruction *I,
139                                       bool AllowVolatile) {
140   if (auto *LI = dyn_cast<LoadInst>(I)) {
141     if (!AllowVolatile && LI->isVolatile())
142       return nullptr;
143     return LI->getPointerOperand();
144   }
145 
146   if (auto *SI = dyn_cast<StoreInst>(I)) {
147     if (!AllowVolatile && SI->isVolatile())
148       return nullptr;
149     return SI->getPointerOperand();
150   }
151 
152   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
153     if (!AllowVolatile && CXI->isVolatile())
154       return nullptr;
155     return CXI->getPointerOperand();
156   }
157 
158   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
159     if (!AllowVolatile && RMWI->isVolatile())
160       return nullptr;
161     return RMWI->getPointerOperand();
162   }
163 
164   return nullptr;
165 }
166 
167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
169 /// getelement pointer instructions that traverse the natural type of \p Ptr if
170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
171 /// through a cast to i8*.
172 ///
173 /// TODO: This could probably live somewhere more prominantly if it doesn't
174 ///       already exist.
175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
176                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
177   assert(Offset >= 0 && "Negative offset not supported yet!");
178   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
179                     << "-bytes as " << *ResTy << "\n");
180 
181   // The initial type we are trying to traverse to get nice GEPs.
182   Type *Ty = Ptr->getType();
183 
184   SmallVector<Value *, 4> Indices;
185   std::string GEPName = Ptr->getName().str();
186   while (Offset) {
187     uint64_t Idx, Rem;
188 
189     if (auto *STy = dyn_cast<StructType>(Ty)) {
190       const StructLayout *SL = DL.getStructLayout(STy);
191       if (int64_t(SL->getSizeInBytes()) < Offset)
192         break;
193       Idx = SL->getElementContainingOffset(Offset);
194       assert(Idx < STy->getNumElements() && "Offset calculation error!");
195       Rem = Offset - SL->getElementOffset(Idx);
196       Ty = STy->getElementType(Idx);
197     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
198       Ty = PTy->getElementType();
199       if (!Ty->isSized())
200         break;
201       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
202       assert(ElementSize && "Expected type with size!");
203       Idx = Offset / ElementSize;
204       Rem = Offset % ElementSize;
205     } else {
206       // Non-aggregate type, we cast and make byte-wise progress now.
207       break;
208     }
209 
210     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213     GEPName += "." + std::to_string(Idx);
214     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215     Offset = Rem;
216   }
217 
218   // Create a GEP if we collected indices above.
219   if (Indices.size())
220     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
221 
222   // If an offset is left we use byte-wise adjustment.
223   if (Offset) {
224     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
225     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
226                         GEPName + ".b" + Twine(Offset));
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
257         /* TrackDependence */ false);
258   bool AnyDead = false;
259 
260   using Item = std::pair<Value *, const Instruction *>;
261   SmallSet<Item, 16> Visited;
262   SmallVector<Item, 16> Worklist;
263   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
264 
265   int Iteration = 0;
266   do {
267     Item I = Worklist.pop_back_val();
268     Value *V = I.first;
269     CtxI = I.second;
270     if (StripCB)
271       V = StripCB(V);
272 
273     // Check if we should process the current value. To prevent endless
274     // recursion keep a record of the values we followed!
275     if (!Visited.insert(I).second)
276       continue;
277 
278     // Make sure we limit the compile time for complex expressions.
279     if (Iteration++ >= MaxValues)
280       return false;
281 
282     // Explicitly look through calls with a "returned" attribute if we do
283     // not have a pointer as stripPointerCasts only works on them.
284     Value *NewV = nullptr;
285     if (V->getType()->isPointerTy()) {
286       NewV = V->stripPointerCasts();
287     } else {
288       auto *CB = dyn_cast<CallBase>(V);
289       if (CB && CB->getCalledFunction()) {
290         for (Argument &Arg : CB->getCalledFunction()->args())
291           if (Arg.hasReturnedAttr()) {
292             NewV = CB->getArgOperand(Arg.getArgNo());
293             break;
294           }
295       }
296     }
297     if (NewV && NewV != V) {
298       Worklist.push_back({NewV, CtxI});
299       continue;
300     }
301 
302     // Look through select instructions, visit both potential values.
303     if (auto *SI = dyn_cast<SelectInst>(V)) {
304       Worklist.push_back({SI->getTrueValue(), CtxI});
305       Worklist.push_back({SI->getFalseValue(), CtxI});
306       continue;
307     }
308 
309     // Look through phi nodes, visit all live operands.
310     if (auto *PHI = dyn_cast<PHINode>(V)) {
311       assert(LivenessAA &&
312              "Expected liveness in the presence of instructions!");
313       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
314         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
315         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
316                             LivenessAA,
317                             /* CheckBBLivenessOnly */ true)) {
318           AnyDead = true;
319           continue;
320         }
321         Worklist.push_back(
322             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
323       }
324       continue;
325     }
326 
327     // Once a leaf is reached we inform the user through the callback.
328     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
329       return false;
330   } while (!Worklist.empty());
331 
332   // If we actually used liveness information so we have to record a dependence.
333   if (AnyDead)
334     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
335 
336   // All values have been visited.
337   return true;
338 }
339 
340 static const Value *
341 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
342                                      const DataLayout &DL,
343                                      bool AllowNonInbounds = false) {
344   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
345   if (!Ptr)
346     return nullptr;
347 
348   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
349                                           AllowNonInbounds);
350 }
351 
352 /// Helper function to clamp a state \p S of type \p StateType with the
353 /// information in \p R and indicate/return if \p S did change (as-in update is
354 /// required to be run again).
355 template <typename StateType>
356 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
357   auto Assumed = S.getAssumed();
358   S ^= R;
359   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
360                                    : ChangeStatus::CHANGED;
361 }
362 
363 /// Clamp the information known for all returned values of a function
364 /// (identified by \p QueryingAA) into \p S.
365 template <typename AAType, typename StateType = typename AAType::StateType>
366 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
367                                      StateType &S) {
368   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
369                     << QueryingAA << " into " << S << "\n");
370 
371   assert((QueryingAA.getIRPosition().getPositionKind() ==
372               IRPosition::IRP_RETURNED ||
373           QueryingAA.getIRPosition().getPositionKind() ==
374               IRPosition::IRP_CALL_SITE_RETURNED) &&
375          "Can only clamp returned value states for a function returned or call "
376          "site returned position!");
377 
378   // Use an optional state as there might not be any return values and we want
379   // to join (IntegerState::operator&) the state of all there are.
380   Optional<StateType> T;
381 
382   // Callback for each possibly returned value.
383   auto CheckReturnValue = [&](Value &RV) -> bool {
384     const IRPosition &RVPos = IRPosition::value(RV);
385     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
386     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
387                       << " @ " << RVPos << "\n");
388     const StateType &AAS = static_cast<const StateType &>(AA.getState());
389     if (T.hasValue())
390       *T &= AAS;
391     else
392       T = AAS;
393     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
394                       << "\n");
395     return T->isValidState();
396   };
397 
398   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
399     S.indicatePessimisticFixpoint();
400   else if (T.hasValue())
401     S ^= *T;
402 }
403 
404 /// Helper class for generic deduction: return value -> returned position.
405 template <typename AAType, typename BaseType,
406           typename StateType = typename BaseType::StateType>
407 struct AAReturnedFromReturnedValues : public BaseType {
408   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
409       : BaseType(IRP, A) {}
410 
411   /// See AbstractAttribute::updateImpl(...).
412   ChangeStatus updateImpl(Attributor &A) override {
413     StateType S(StateType::getBestState(this->getState()));
414     clampReturnedValueStates<AAType, StateType>(A, *this, S);
415     // TODO: If we know we visited all returned values, thus no are assumed
416     // dead, we can take the known information from the state T.
417     return clampStateAndIndicateChange<StateType>(this->getState(), S);
418   }
419 };
420 
421 /// Clamp the information known at all call sites for a given argument
422 /// (identified by \p QueryingAA) into \p S.
423 template <typename AAType, typename StateType = typename AAType::StateType>
424 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
425                                         StateType &S) {
426   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
427                     << QueryingAA << " into " << S << "\n");
428 
429   assert(QueryingAA.getIRPosition().getPositionKind() ==
430              IRPosition::IRP_ARGUMENT &&
431          "Can only clamp call site argument states for an argument position!");
432 
433   // Use an optional state as there might not be any return values and we want
434   // to join (IntegerState::operator&) the state of all there are.
435   Optional<StateType> T;
436 
437   // The argument number which is also the call site argument number.
438   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
439 
440   auto CallSiteCheck = [&](AbstractCallSite ACS) {
441     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
442     // Check if a coresponding argument was found or if it is on not associated
443     // (which can happen for callback calls).
444     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
445       return false;
446 
447     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
448     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
449                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
450     const StateType &AAS = static_cast<const StateType &>(AA.getState());
451     if (T.hasValue())
452       *T &= AAS;
453     else
454       T = AAS;
455     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
456                       << "\n");
457     return T->isValidState();
458   };
459 
460   bool AllCallSitesKnown;
461   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
462                               AllCallSitesKnown))
463     S.indicatePessimisticFixpoint();
464   else if (T.hasValue())
465     S ^= *T;
466 }
467 
468 /// Helper class for generic deduction: call site argument -> argument position.
469 template <typename AAType, typename BaseType,
470           typename StateType = typename AAType::StateType>
471 struct AAArgumentFromCallSiteArguments : public BaseType {
472   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
473       : BaseType(IRP, A) {}
474 
475   /// See AbstractAttribute::updateImpl(...).
476   ChangeStatus updateImpl(Attributor &A) override {
477     StateType S(StateType::getBestState(this->getState()));
478     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
479     // TODO: If we know we visited all incoming values, thus no are assumed
480     // dead, we can take the known information from the state T.
481     return clampStateAndIndicateChange<StateType>(this->getState(), S);
482   }
483 };
484 
485 /// Helper class for generic replication: function returned -> cs returned.
486 template <typename AAType, typename BaseType,
487           typename StateType = typename BaseType::StateType>
488 struct AACallSiteReturnedFromReturned : public BaseType {
489   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
490       : BaseType(IRP, A) {}
491 
492   /// See AbstractAttribute::updateImpl(...).
493   ChangeStatus updateImpl(Attributor &A) override {
494     assert(this->getIRPosition().getPositionKind() ==
495                IRPosition::IRP_CALL_SITE_RETURNED &&
496            "Can only wrap function returned positions for call site returned "
497            "positions!");
498     auto &S = this->getState();
499 
500     const Function *AssociatedFunction =
501         this->getIRPosition().getAssociatedFunction();
502     if (!AssociatedFunction)
503       return S.indicatePessimisticFixpoint();
504 
505     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
506     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
507     return clampStateAndIndicateChange(
508         S, static_cast<const StateType &>(AA.getState()));
509   }
510 };
511 
512 /// Helper function to accumulate uses.
513 template <class AAType, typename StateType = typename AAType::StateType>
514 static void followUsesInContext(AAType &AA, Attributor &A,
515                                 MustBeExecutedContextExplorer &Explorer,
516                                 const Instruction *CtxI,
517                                 SetVector<const Use *> &Uses,
518                                 StateType &State) {
519   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
520   for (unsigned u = 0; u < Uses.size(); ++u) {
521     const Use *U = Uses[u];
522     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
523       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
524       if (Found && AA.followUseInMBEC(A, U, UserI, State))
525         for (const Use &Us : UserI->uses())
526           Uses.insert(&Us);
527     }
528   }
529 }
530 
531 /// Use the must-be-executed-context around \p I to add information into \p S.
532 /// The AAType class is required to have `followUseInMBEC` method with the
533 /// following signature and behaviour:
534 ///
535 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
536 /// U - Underlying use.
537 /// I - The user of the \p U.
538 /// Returns true if the value should be tracked transitively.
539 ///
540 template <class AAType, typename StateType = typename AAType::StateType>
541 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
542                             Instruction &CtxI) {
543 
544   // Container for (transitive) uses of the associated value.
545   SetVector<const Use *> Uses;
546   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
547     Uses.insert(&U);
548 
549   MustBeExecutedContextExplorer &Explorer =
550       A.getInfoCache().getMustBeExecutedContextExplorer();
551 
552   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
553 
554   if (S.isAtFixpoint())
555     return;
556 
557   SmallVector<const BranchInst *, 4> BrInsts;
558   auto Pred = [&](const Instruction *I) {
559     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
560       if (Br->isConditional())
561         BrInsts.push_back(Br);
562     return true;
563   };
564 
565   // Here, accumulate conditional branch instructions in the context. We
566   // explore the child paths and collect the known states. The disjunction of
567   // those states can be merged to its own state. Let ParentState_i be a state
568   // to indicate the known information for an i-th branch instruction in the
569   // context. ChildStates are created for its successors respectively.
570   //
571   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
572   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
573   //      ...
574   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
575   //
576   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
577   //
578   // FIXME: Currently, recursive branches are not handled. For example, we
579   // can't deduce that ptr must be dereferenced in below function.
580   //
581   // void f(int a, int c, int *ptr) {
582   //    if(a)
583   //      if (b) {
584   //        *ptr = 0;
585   //      } else {
586   //        *ptr = 1;
587   //      }
588   //    else {
589   //      if (b) {
590   //        *ptr = 0;
591   //      } else {
592   //        *ptr = 1;
593   //      }
594   //    }
595   // }
596 
597   Explorer.checkForAllContext(&CtxI, Pred);
598   for (const BranchInst *Br : BrInsts) {
599     StateType ParentState;
600 
601     // The known state of the parent state is a conjunction of children's
602     // known states so it is initialized with a best state.
603     ParentState.indicateOptimisticFixpoint();
604 
605     for (const BasicBlock *BB : Br->successors()) {
606       StateType ChildState;
607 
608       size_t BeforeSize = Uses.size();
609       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
610 
611       // Erase uses which only appear in the child.
612       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
613         It = Uses.erase(It);
614 
615       ParentState &= ChildState;
616     }
617 
618     // Use only known state.
619     S += ParentState;
620   }
621 }
622 
623 /// -----------------------NoUnwind Function Attribute--------------------------
624 
625 struct AANoUnwindImpl : AANoUnwind {
626   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
627 
628   const std::string getAsStr() const override {
629     return getAssumed() ? "nounwind" : "may-unwind";
630   }
631 
632   /// See AbstractAttribute::updateImpl(...).
633   ChangeStatus updateImpl(Attributor &A) override {
634     auto Opcodes = {
635         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
636         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
637         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
638 
639     auto CheckForNoUnwind = [&](Instruction &I) {
640       if (!I.mayThrow())
641         return true;
642 
643       if (const auto *CB = dyn_cast<CallBase>(&I)) {
644         const auto &NoUnwindAA =
645             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
646         return NoUnwindAA.isAssumedNoUnwind();
647       }
648       return false;
649     };
650 
651     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
652       return indicatePessimisticFixpoint();
653 
654     return ChangeStatus::UNCHANGED;
655   }
656 };
657 
658 struct AANoUnwindFunction final : public AANoUnwindImpl {
659   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
660       : AANoUnwindImpl(IRP, A) {}
661 
662   /// See AbstractAttribute::trackStatistics()
663   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
664 };
665 
666 /// NoUnwind attribute deduction for a call sites.
667 struct AANoUnwindCallSite final : AANoUnwindImpl {
668   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
669       : AANoUnwindImpl(IRP, A) {}
670 
671   /// See AbstractAttribute::initialize(...).
672   void initialize(Attributor &A) override {
673     AANoUnwindImpl::initialize(A);
674     Function *F = getAssociatedFunction();
675     if (!F)
676       indicatePessimisticFixpoint();
677   }
678 
679   /// See AbstractAttribute::updateImpl(...).
680   ChangeStatus updateImpl(Attributor &A) override {
681     // TODO: Once we have call site specific value information we can provide
682     //       call site specific liveness information and then it makes
683     //       sense to specialize attributes for call sites arguments instead of
684     //       redirecting requests to the callee argument.
685     Function *F = getAssociatedFunction();
686     const IRPosition &FnPos = IRPosition::function(*F);
687     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
688     return clampStateAndIndicateChange(
689         getState(),
690         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
691   }
692 
693   /// See AbstractAttribute::trackStatistics()
694   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
695 };
696 
697 /// --------------------- Function Return Values -------------------------------
698 
699 /// "Attribute" that collects all potential returned values and the return
700 /// instructions that they arise from.
701 ///
702 /// If there is a unique returned value R, the manifest method will:
703 ///   - mark R with the "returned" attribute, if R is an argument.
704 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
705 
706   /// Mapping of values potentially returned by the associated function to the
707   /// return instructions that might return them.
708   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
709 
710   /// Mapping to remember the number of returned values for a call site such
711   /// that we can avoid updates if nothing changed.
712   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
713 
714   /// Set of unresolved calls returned by the associated function.
715   SmallSetVector<CallBase *, 4> UnresolvedCalls;
716 
717   /// State flags
718   ///
719   ///{
720   bool IsFixed = false;
721   bool IsValidState = true;
722   ///}
723 
724 public:
725   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
726       : AAReturnedValues(IRP, A) {}
727 
728   /// See AbstractAttribute::initialize(...).
729   void initialize(Attributor &A) override {
730     // Reset the state.
731     IsFixed = false;
732     IsValidState = true;
733     ReturnedValues.clear();
734 
735     Function *F = getAssociatedFunction();
736     if (!F) {
737       indicatePessimisticFixpoint();
738       return;
739     }
740     assert(!F->getReturnType()->isVoidTy() &&
741            "Did not expect a void return type!");
742 
743     // The map from instruction opcodes to those instructions in the function.
744     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
745 
746     // Look through all arguments, if one is marked as returned we are done.
747     for (Argument &Arg : F->args()) {
748       if (Arg.hasReturnedAttr()) {
749         auto &ReturnInstSet = ReturnedValues[&Arg];
750         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
751           for (Instruction *RI : *Insts)
752             ReturnInstSet.insert(cast<ReturnInst>(RI));
753 
754         indicateOptimisticFixpoint();
755         return;
756       }
757     }
758 
759     if (!A.isFunctionIPOAmendable(*F))
760       indicatePessimisticFixpoint();
761   }
762 
763   /// See AbstractAttribute::manifest(...).
764   ChangeStatus manifest(Attributor &A) override;
765 
766   /// See AbstractAttribute::getState(...).
767   AbstractState &getState() override { return *this; }
768 
769   /// See AbstractAttribute::getState(...).
770   const AbstractState &getState() const override { return *this; }
771 
772   /// See AbstractAttribute::updateImpl(Attributor &A).
773   ChangeStatus updateImpl(Attributor &A) override;
774 
775   llvm::iterator_range<iterator> returned_values() override {
776     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
777   }
778 
779   llvm::iterator_range<const_iterator> returned_values() const override {
780     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
781   }
782 
783   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
784     return UnresolvedCalls;
785   }
786 
787   /// Return the number of potential return values, -1 if unknown.
788   size_t getNumReturnValues() const override {
789     return isValidState() ? ReturnedValues.size() : -1;
790   }
791 
792   /// Return an assumed unique return value if a single candidate is found. If
793   /// there cannot be one, return a nullptr. If it is not clear yet, return the
794   /// Optional::NoneType.
795   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
796 
797   /// See AbstractState::checkForAllReturnedValues(...).
798   bool checkForAllReturnedValuesAndReturnInsts(
799       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
800       const override;
801 
802   /// Pretty print the attribute similar to the IR representation.
803   const std::string getAsStr() const override;
804 
805   /// See AbstractState::isAtFixpoint().
806   bool isAtFixpoint() const override { return IsFixed; }
807 
808   /// See AbstractState::isValidState().
809   bool isValidState() const override { return IsValidState; }
810 
811   /// See AbstractState::indicateOptimisticFixpoint(...).
812   ChangeStatus indicateOptimisticFixpoint() override {
813     IsFixed = true;
814     return ChangeStatus::UNCHANGED;
815   }
816 
817   ChangeStatus indicatePessimisticFixpoint() override {
818     IsFixed = true;
819     IsValidState = false;
820     return ChangeStatus::CHANGED;
821   }
822 };
823 
824 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
825   ChangeStatus Changed = ChangeStatus::UNCHANGED;
826 
827   // Bookkeeping.
828   assert(isValidState());
829   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
830                   "Number of function with known return values");
831 
832   // Check if we have an assumed unique return value that we could manifest.
833   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
834 
835   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
836     return Changed;
837 
838   // Bookkeeping.
839   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
840                   "Number of function with unique return");
841 
842   // Callback to replace the uses of CB with the constant C.
843   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
844     if (CB.use_empty())
845       return ChangeStatus::UNCHANGED;
846     if (A.changeValueAfterManifest(CB, C))
847       return ChangeStatus::CHANGED;
848     return ChangeStatus::UNCHANGED;
849   };
850 
851   // If the assumed unique return value is an argument, annotate it.
852   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
853     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
854             getAssociatedFunction()->getReturnType())) {
855       getIRPosition() = IRPosition::argument(*UniqueRVArg);
856       Changed = IRAttribute::manifest(A);
857     }
858   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
859     // We can replace the returned value with the unique returned constant.
860     Value &AnchorValue = getAnchorValue();
861     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
862       for (const Use &U : F->uses())
863         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
864           if (CB->isCallee(&U)) {
865             Constant *RVCCast =
866                 CB->getType() == RVC->getType()
867                     ? RVC
868                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
869             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
870           }
871     } else {
872       assert(isa<CallBase>(AnchorValue) &&
873              "Expcected a function or call base anchor!");
874       Constant *RVCCast =
875           AnchorValue.getType() == RVC->getType()
876               ? RVC
877               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
878       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
879     }
880     if (Changed == ChangeStatus::CHANGED)
881       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
882                       "Number of function returns replaced by constant return");
883   }
884 
885   return Changed;
886 }
887 
888 const std::string AAReturnedValuesImpl::getAsStr() const {
889   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
890          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
891          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
892 }
893 
894 Optional<Value *>
895 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
896   // If checkForAllReturnedValues provides a unique value, ignoring potential
897   // undef values that can also be present, it is assumed to be the actual
898   // return value and forwarded to the caller of this method. If there are
899   // multiple, a nullptr is returned indicating there cannot be a unique
900   // returned value.
901   Optional<Value *> UniqueRV;
902 
903   auto Pred = [&](Value &RV) -> bool {
904     // If we found a second returned value and neither the current nor the saved
905     // one is an undef, there is no unique returned value. Undefs are special
906     // since we can pretend they have any value.
907     if (UniqueRV.hasValue() && UniqueRV != &RV &&
908         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
909       UniqueRV = nullptr;
910       return false;
911     }
912 
913     // Do not overwrite a value with an undef.
914     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
915       UniqueRV = &RV;
916 
917     return true;
918   };
919 
920   if (!A.checkForAllReturnedValues(Pred, *this))
921     UniqueRV = nullptr;
922 
923   return UniqueRV;
924 }
925 
926 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
927     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
928     const {
929   if (!isValidState())
930     return false;
931 
932   // Check all returned values but ignore call sites as long as we have not
933   // encountered an overdefined one during an update.
934   for (auto &It : ReturnedValues) {
935     Value *RV = It.first;
936 
937     CallBase *CB = dyn_cast<CallBase>(RV);
938     if (CB && !UnresolvedCalls.count(CB))
939       continue;
940 
941     if (!Pred(*RV, It.second))
942       return false;
943   }
944 
945   return true;
946 }
947 
948 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
949   size_t NumUnresolvedCalls = UnresolvedCalls.size();
950   bool Changed = false;
951 
952   // State used in the value traversals starting in returned values.
953   struct RVState {
954     // The map in which we collect return values -> return instrs.
955     decltype(ReturnedValues) &RetValsMap;
956     // The flag to indicate a change.
957     bool &Changed;
958     // The return instrs we come from.
959     SmallSetVector<ReturnInst *, 4> RetInsts;
960   };
961 
962   // Callback for a leaf value returned by the associated function.
963   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
964                          bool) -> bool {
965     auto Size = RVS.RetValsMap[&Val].size();
966     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
967     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
968     RVS.Changed |= Inserted;
969     LLVM_DEBUG({
970       if (Inserted)
971         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
972                << " => " << RVS.RetInsts.size() << "\n";
973     });
974     return true;
975   };
976 
977   // Helper method to invoke the generic value traversal.
978   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
979                                 const Instruction *CtxI) {
980     IRPosition RetValPos = IRPosition::value(RV);
981     return genericValueTraversal<AAReturnedValues, RVState>(
982         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
983   };
984 
985   // Callback for all "return intructions" live in the associated function.
986   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
987     ReturnInst &Ret = cast<ReturnInst>(I);
988     RVState RVS({ReturnedValues, Changed, {}});
989     RVS.RetInsts.insert(&Ret);
990     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
991   };
992 
993   // Start by discovering returned values from all live returned instructions in
994   // the associated function.
995   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
996     return indicatePessimisticFixpoint();
997 
998   // Once returned values "directly" present in the code are handled we try to
999   // resolve returned calls.
1000   decltype(ReturnedValues) NewRVsMap;
1001   for (auto &It : ReturnedValues) {
1002     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1003                       << " by #" << It.second.size() << " RIs\n");
1004     CallBase *CB = dyn_cast<CallBase>(It.first);
1005     if (!CB || UnresolvedCalls.count(CB))
1006       continue;
1007 
1008     if (!CB->getCalledFunction()) {
1009       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1010                         << "\n");
1011       UnresolvedCalls.insert(CB);
1012       continue;
1013     }
1014 
1015     // TODO: use the function scope once we have call site AAReturnedValues.
1016     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1017         *this, IRPosition::function(*CB->getCalledFunction()));
1018     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1019                       << RetValAA << "\n");
1020 
1021     // Skip dead ends, thus if we do not know anything about the returned
1022     // call we mark it as unresolved and it will stay that way.
1023     if (!RetValAA.getState().isValidState()) {
1024       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1025                         << "\n");
1026       UnresolvedCalls.insert(CB);
1027       continue;
1028     }
1029 
1030     // Do not try to learn partial information. If the callee has unresolved
1031     // return values we will treat the call as unresolved/opaque.
1032     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1033     if (!RetValAAUnresolvedCalls.empty()) {
1034       UnresolvedCalls.insert(CB);
1035       continue;
1036     }
1037 
1038     // Now check if we can track transitively returned values. If possible, thus
1039     // if all return value can be represented in the current scope, do so.
1040     bool Unresolved = false;
1041     for (auto &RetValAAIt : RetValAA.returned_values()) {
1042       Value *RetVal = RetValAAIt.first;
1043       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1044           isa<Constant>(RetVal))
1045         continue;
1046       // Anything that did not fit in the above categories cannot be resolved,
1047       // mark the call as unresolved.
1048       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1049                            "cannot be translated: "
1050                         << *RetVal << "\n");
1051       UnresolvedCalls.insert(CB);
1052       Unresolved = true;
1053       break;
1054     }
1055 
1056     if (Unresolved)
1057       continue;
1058 
1059     // Now track transitively returned values.
1060     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1061     if (NumRetAA == RetValAA.getNumReturnValues()) {
1062       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1063                            "changed since it was seen last\n");
1064       continue;
1065     }
1066     NumRetAA = RetValAA.getNumReturnValues();
1067 
1068     for (auto &RetValAAIt : RetValAA.returned_values()) {
1069       Value *RetVal = RetValAAIt.first;
1070       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1071         // Arguments are mapped to call site operands and we begin the traversal
1072         // again.
1073         bool Unused = false;
1074         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1075         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1076         continue;
1077       } else if (isa<CallBase>(RetVal)) {
1078         // Call sites are resolved by the callee attribute over time, no need to
1079         // do anything for us.
1080         continue;
1081       } else if (isa<Constant>(RetVal)) {
1082         // Constants are valid everywhere, we can simply take them.
1083         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1084         continue;
1085       }
1086     }
1087   }
1088 
1089   // To avoid modifications to the ReturnedValues map while we iterate over it
1090   // we kept record of potential new entries in a copy map, NewRVsMap.
1091   for (auto &It : NewRVsMap) {
1092     assert(!It.second.empty() && "Entry does not add anything.");
1093     auto &ReturnInsts = ReturnedValues[It.first];
1094     for (ReturnInst *RI : It.second)
1095       if (ReturnInsts.insert(RI)) {
1096         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1097                           << *It.first << " => " << *RI << "\n");
1098         Changed = true;
1099       }
1100   }
1101 
1102   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1103   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1104 }
1105 
1106 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1107   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1108       : AAReturnedValuesImpl(IRP, A) {}
1109 
1110   /// See AbstractAttribute::trackStatistics()
1111   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1112 };
1113 
1114 /// Returned values information for a call sites.
1115 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1116   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1117       : AAReturnedValuesImpl(IRP, A) {}
1118 
1119   /// See AbstractAttribute::initialize(...).
1120   void initialize(Attributor &A) override {
1121     // TODO: Once we have call site specific value information we can provide
1122     //       call site specific liveness information and then it makes
1123     //       sense to specialize attributes for call sites instead of
1124     //       redirecting requests to the callee.
1125     llvm_unreachable("Abstract attributes for returned values are not "
1126                      "supported for call sites yet!");
1127   }
1128 
1129   /// See AbstractAttribute::updateImpl(...).
1130   ChangeStatus updateImpl(Attributor &A) override {
1131     return indicatePessimisticFixpoint();
1132   }
1133 
1134   /// See AbstractAttribute::trackStatistics()
1135   void trackStatistics() const override {}
1136 };
1137 
1138 /// ------------------------ NoSync Function Attribute -------------------------
1139 
1140 struct AANoSyncImpl : AANoSync {
1141   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1142 
1143   const std::string getAsStr() const override {
1144     return getAssumed() ? "nosync" : "may-sync";
1145   }
1146 
1147   /// See AbstractAttribute::updateImpl(...).
1148   ChangeStatus updateImpl(Attributor &A) override;
1149 
1150   /// Helper function used to determine whether an instruction is non-relaxed
1151   /// atomic. In other words, if an atomic instruction does not have unordered
1152   /// or monotonic ordering
1153   static bool isNonRelaxedAtomic(Instruction *I);
1154 
1155   /// Helper function used to determine whether an instruction is volatile.
1156   static bool isVolatile(Instruction *I);
1157 
1158   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1159   /// memset).
1160   static bool isNoSyncIntrinsic(Instruction *I);
1161 };
1162 
1163 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1164   if (!I->isAtomic())
1165     return false;
1166 
1167   AtomicOrdering Ordering;
1168   switch (I->getOpcode()) {
1169   case Instruction::AtomicRMW:
1170     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1171     break;
1172   case Instruction::Store:
1173     Ordering = cast<StoreInst>(I)->getOrdering();
1174     break;
1175   case Instruction::Load:
1176     Ordering = cast<LoadInst>(I)->getOrdering();
1177     break;
1178   case Instruction::Fence: {
1179     auto *FI = cast<FenceInst>(I);
1180     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1181       return false;
1182     Ordering = FI->getOrdering();
1183     break;
1184   }
1185   case Instruction::AtomicCmpXchg: {
1186     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1187     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1188     // Only if both are relaxed, than it can be treated as relaxed.
1189     // Otherwise it is non-relaxed.
1190     if (Success != AtomicOrdering::Unordered &&
1191         Success != AtomicOrdering::Monotonic)
1192       return true;
1193     if (Failure != AtomicOrdering::Unordered &&
1194         Failure != AtomicOrdering::Monotonic)
1195       return true;
1196     return false;
1197   }
1198   default:
1199     llvm_unreachable(
1200         "New atomic operations need to be known in the attributor.");
1201   }
1202 
1203   // Relaxed.
1204   if (Ordering == AtomicOrdering::Unordered ||
1205       Ordering == AtomicOrdering::Monotonic)
1206     return false;
1207   return true;
1208 }
1209 
1210 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1211 /// FIXME: We should ipmrove the handling of intrinsics.
1212 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1213   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1214     switch (II->getIntrinsicID()) {
1215     /// Element wise atomic memory intrinsics are can only be unordered,
1216     /// therefore nosync.
1217     case Intrinsic::memset_element_unordered_atomic:
1218     case Intrinsic::memmove_element_unordered_atomic:
1219     case Intrinsic::memcpy_element_unordered_atomic:
1220       return true;
1221     case Intrinsic::memset:
1222     case Intrinsic::memmove:
1223     case Intrinsic::memcpy:
1224       if (!cast<MemIntrinsic>(II)->isVolatile())
1225         return true;
1226       return false;
1227     default:
1228       return false;
1229     }
1230   }
1231   return false;
1232 }
1233 
1234 bool AANoSyncImpl::isVolatile(Instruction *I) {
1235   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1236 
1237   switch (I->getOpcode()) {
1238   case Instruction::AtomicRMW:
1239     return cast<AtomicRMWInst>(I)->isVolatile();
1240   case Instruction::Store:
1241     return cast<StoreInst>(I)->isVolatile();
1242   case Instruction::Load:
1243     return cast<LoadInst>(I)->isVolatile();
1244   case Instruction::AtomicCmpXchg:
1245     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1246   default:
1247     return false;
1248   }
1249 }
1250 
1251 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1252 
1253   auto CheckRWInstForNoSync = [&](Instruction &I) {
1254     /// We are looking for volatile instructions or Non-Relaxed atomics.
1255     /// FIXME: We should improve the handling of intrinsics.
1256 
1257     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1258       return true;
1259 
1260     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1261       if (CB->hasFnAttr(Attribute::NoSync))
1262         return true;
1263 
1264       const auto &NoSyncAA =
1265           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1266       if (NoSyncAA.isAssumedNoSync())
1267         return true;
1268       return false;
1269     }
1270 
1271     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1272       return true;
1273 
1274     return false;
1275   };
1276 
1277   auto CheckForNoSync = [&](Instruction &I) {
1278     // At this point we handled all read/write effects and they are all
1279     // nosync, so they can be skipped.
1280     if (I.mayReadOrWriteMemory())
1281       return true;
1282 
1283     // non-convergent and readnone imply nosync.
1284     return !cast<CallBase>(I).isConvergent();
1285   };
1286 
1287   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1288       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1289     return indicatePessimisticFixpoint();
1290 
1291   return ChangeStatus::UNCHANGED;
1292 }
1293 
1294 struct AANoSyncFunction final : public AANoSyncImpl {
1295   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1296       : AANoSyncImpl(IRP, A) {}
1297 
1298   /// See AbstractAttribute::trackStatistics()
1299   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1300 };
1301 
1302 /// NoSync attribute deduction for a call sites.
1303 struct AANoSyncCallSite final : AANoSyncImpl {
1304   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1305       : AANoSyncImpl(IRP, A) {}
1306 
1307   /// See AbstractAttribute::initialize(...).
1308   void initialize(Attributor &A) override {
1309     AANoSyncImpl::initialize(A);
1310     Function *F = getAssociatedFunction();
1311     if (!F)
1312       indicatePessimisticFixpoint();
1313   }
1314 
1315   /// See AbstractAttribute::updateImpl(...).
1316   ChangeStatus updateImpl(Attributor &A) override {
1317     // TODO: Once we have call site specific value information we can provide
1318     //       call site specific liveness information and then it makes
1319     //       sense to specialize attributes for call sites arguments instead of
1320     //       redirecting requests to the callee argument.
1321     Function *F = getAssociatedFunction();
1322     const IRPosition &FnPos = IRPosition::function(*F);
1323     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1324     return clampStateAndIndicateChange(
1325         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1326   }
1327 
1328   /// See AbstractAttribute::trackStatistics()
1329   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1330 };
1331 
1332 /// ------------------------ No-Free Attributes ----------------------------
1333 
1334 struct AANoFreeImpl : public AANoFree {
1335   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1336 
1337   /// See AbstractAttribute::updateImpl(...).
1338   ChangeStatus updateImpl(Attributor &A) override {
1339     auto CheckForNoFree = [&](Instruction &I) {
1340       const auto &CB = cast<CallBase>(I);
1341       if (CB.hasFnAttr(Attribute::NoFree))
1342         return true;
1343 
1344       const auto &NoFreeAA =
1345           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1346       return NoFreeAA.isAssumedNoFree();
1347     };
1348 
1349     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1350       return indicatePessimisticFixpoint();
1351     return ChangeStatus::UNCHANGED;
1352   }
1353 
1354   /// See AbstractAttribute::getAsStr().
1355   const std::string getAsStr() const override {
1356     return getAssumed() ? "nofree" : "may-free";
1357   }
1358 };
1359 
1360 struct AANoFreeFunction final : public AANoFreeImpl {
1361   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1362       : AANoFreeImpl(IRP, A) {}
1363 
1364   /// See AbstractAttribute::trackStatistics()
1365   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1366 };
1367 
1368 /// NoFree attribute deduction for a call sites.
1369 struct AANoFreeCallSite final : AANoFreeImpl {
1370   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1371       : AANoFreeImpl(IRP, A) {}
1372 
1373   /// See AbstractAttribute::initialize(...).
1374   void initialize(Attributor &A) override {
1375     AANoFreeImpl::initialize(A);
1376     Function *F = getAssociatedFunction();
1377     if (!F)
1378       indicatePessimisticFixpoint();
1379   }
1380 
1381   /// See AbstractAttribute::updateImpl(...).
1382   ChangeStatus updateImpl(Attributor &A) override {
1383     // TODO: Once we have call site specific value information we can provide
1384     //       call site specific liveness information and then it makes
1385     //       sense to specialize attributes for call sites arguments instead of
1386     //       redirecting requests to the callee argument.
1387     Function *F = getAssociatedFunction();
1388     const IRPosition &FnPos = IRPosition::function(*F);
1389     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1390     return clampStateAndIndicateChange(
1391         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1392   }
1393 
1394   /// See AbstractAttribute::trackStatistics()
1395   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1396 };
1397 
1398 /// NoFree attribute for floating values.
1399 struct AANoFreeFloating : AANoFreeImpl {
1400   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1401       : AANoFreeImpl(IRP, A) {}
1402 
1403   /// See AbstractAttribute::trackStatistics()
1404   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1405 
1406   /// See Abstract Attribute::updateImpl(...).
1407   ChangeStatus updateImpl(Attributor &A) override {
1408     const IRPosition &IRP = getIRPosition();
1409 
1410     const auto &NoFreeAA =
1411         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1412     if (NoFreeAA.isAssumedNoFree())
1413       return ChangeStatus::UNCHANGED;
1414 
1415     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1416     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1417       Instruction *UserI = cast<Instruction>(U.getUser());
1418       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1419         if (CB->isBundleOperand(&U))
1420           return false;
1421         if (!CB->isArgOperand(&U))
1422           return true;
1423         unsigned ArgNo = CB->getArgOperandNo(&U);
1424 
1425         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1426             *this, IRPosition::callsite_argument(*CB, ArgNo));
1427         return NoFreeArg.isAssumedNoFree();
1428       }
1429 
1430       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1431           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1432         Follow = true;
1433         return true;
1434       }
1435       if (isa<ReturnInst>(UserI))
1436         return true;
1437 
1438       // Unknown user.
1439       return false;
1440     };
1441     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1442       return indicatePessimisticFixpoint();
1443 
1444     return ChangeStatus::UNCHANGED;
1445   }
1446 };
1447 
1448 /// NoFree attribute for a call site argument.
1449 struct AANoFreeArgument final : AANoFreeFloating {
1450   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1451       : AANoFreeFloating(IRP, A) {}
1452 
1453   /// See AbstractAttribute::trackStatistics()
1454   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1455 };
1456 
1457 /// NoFree attribute for call site arguments.
1458 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1459   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1460       : AANoFreeFloating(IRP, A) {}
1461 
1462   /// See AbstractAttribute::updateImpl(...).
1463   ChangeStatus updateImpl(Attributor &A) override {
1464     // TODO: Once we have call site specific value information we can provide
1465     //       call site specific liveness information and then it makes
1466     //       sense to specialize attributes for call sites arguments instead of
1467     //       redirecting requests to the callee argument.
1468     Argument *Arg = getAssociatedArgument();
1469     if (!Arg)
1470       return indicatePessimisticFixpoint();
1471     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1472     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1473     return clampStateAndIndicateChange(
1474         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1475   }
1476 
1477   /// See AbstractAttribute::trackStatistics()
1478   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1479 };
1480 
1481 /// NoFree attribute for function return value.
1482 struct AANoFreeReturned final : AANoFreeFloating {
1483   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1484       : AANoFreeFloating(IRP, A) {
1485     llvm_unreachable("NoFree is not applicable to function returns!");
1486   }
1487 
1488   /// See AbstractAttribute::initialize(...).
1489   void initialize(Attributor &A) override {
1490     llvm_unreachable("NoFree is not applicable to function returns!");
1491   }
1492 
1493   /// See AbstractAttribute::updateImpl(...).
1494   ChangeStatus updateImpl(Attributor &A) override {
1495     llvm_unreachable("NoFree is not applicable to function returns!");
1496   }
1497 
1498   /// See AbstractAttribute::trackStatistics()
1499   void trackStatistics() const override {}
1500 };
1501 
1502 /// NoFree attribute deduction for a call site return value.
1503 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1504   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1505       : AANoFreeFloating(IRP, A) {}
1506 
1507   ChangeStatus manifest(Attributor &A) override {
1508     return ChangeStatus::UNCHANGED;
1509   }
1510   /// See AbstractAttribute::trackStatistics()
1511   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1512 };
1513 
1514 /// ------------------------ NonNull Argument Attribute ------------------------
1515 static int64_t getKnownNonNullAndDerefBytesForUse(
1516     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1517     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1518   TrackUse = false;
1519 
1520   const Value *UseV = U->get();
1521   if (!UseV->getType()->isPointerTy())
1522     return 0;
1523 
1524   Type *PtrTy = UseV->getType();
1525   const Function *F = I->getFunction();
1526   bool NullPointerIsDefined =
1527       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1528   const DataLayout &DL = A.getInfoCache().getDL();
1529   if (const auto *CB = dyn_cast<CallBase>(I)) {
1530     if (CB->isBundleOperand(U)) {
1531       if (RetainedKnowledge RK = getKnowledgeFromUse(
1532               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1533         IsNonNull |=
1534             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1535         return RK.ArgValue;
1536       }
1537       return 0;
1538     }
1539 
1540     if (CB->isCallee(U)) {
1541       IsNonNull |= !NullPointerIsDefined;
1542       return 0;
1543     }
1544 
1545     unsigned ArgNo = CB->getArgOperandNo(U);
1546     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1547     // As long as we only use known information there is no need to track
1548     // dependences here.
1549     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1550                                                   /* TrackDependence */ false);
1551     IsNonNull |= DerefAA.isKnownNonNull();
1552     return DerefAA.getKnownDereferenceableBytes();
1553   }
1554 
1555   // We need to follow common pointer manipulation uses to the accesses they
1556   // feed into. We can try to be smart to avoid looking through things we do not
1557   // like for now, e.g., non-inbounds GEPs.
1558   if (isa<CastInst>(I)) {
1559     TrackUse = true;
1560     return 0;
1561   }
1562   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1563     if (GEP->hasAllConstantIndices()) {
1564       TrackUse = true;
1565       return 0;
1566     }
1567 
1568   int64_t Offset;
1569   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1570     if (Base == &AssociatedValue &&
1571         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1572       int64_t DerefBytes =
1573           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1574 
1575       IsNonNull |= !NullPointerIsDefined;
1576       return std::max(int64_t(0), DerefBytes);
1577     }
1578   }
1579 
1580   /// Corner case when an offset is 0.
1581   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1582           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1583     if (Offset == 0 && Base == &AssociatedValue &&
1584         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1585       int64_t DerefBytes =
1586           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1587       IsNonNull |= !NullPointerIsDefined;
1588       return std::max(int64_t(0), DerefBytes);
1589     }
1590   }
1591 
1592   return 0;
1593 }
1594 
1595 struct AANonNullImpl : AANonNull {
1596   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1597       : AANonNull(IRP, A),
1598         NullIsDefined(NullPointerIsDefined(
1599             getAnchorScope(),
1600             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1601 
1602   /// See AbstractAttribute::initialize(...).
1603   void initialize(Attributor &A) override {
1604     if (!NullIsDefined &&
1605         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1606                 /* IgnoreSubsumingPositions */ false, &A))
1607       indicateOptimisticFixpoint();
1608     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1609       indicatePessimisticFixpoint();
1610     else
1611       AANonNull::initialize(A);
1612 
1613     if (!getState().isAtFixpoint())
1614       if (Instruction *CtxI = getCtxI())
1615         followUsesInMBEC(*this, A, getState(), *CtxI);
1616   }
1617 
1618   /// See followUsesInMBEC
1619   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1620                        AANonNull::StateType &State) {
1621     bool IsNonNull = false;
1622     bool TrackUse = false;
1623     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1624                                        IsNonNull, TrackUse);
1625     State.setKnown(IsNonNull);
1626     return TrackUse;
1627   }
1628 
1629   /// See AbstractAttribute::getAsStr().
1630   const std::string getAsStr() const override {
1631     return getAssumed() ? "nonnull" : "may-null";
1632   }
1633 
1634   /// Flag to determine if the underlying value can be null and still allow
1635   /// valid accesses.
1636   const bool NullIsDefined;
1637 };
1638 
1639 /// NonNull attribute for a floating value.
1640 struct AANonNullFloating : public AANonNullImpl {
1641   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1642       : AANonNullImpl(IRP, A) {}
1643 
1644   /// See AbstractAttribute::updateImpl(...).
1645   ChangeStatus updateImpl(Attributor &A) override {
1646     if (!NullIsDefined) {
1647       const auto &DerefAA =
1648           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1649       if (DerefAA.getAssumedDereferenceableBytes())
1650         return ChangeStatus::UNCHANGED;
1651     }
1652 
1653     const DataLayout &DL = A.getDataLayout();
1654 
1655     DominatorTree *DT = nullptr;
1656     AssumptionCache *AC = nullptr;
1657     InformationCache &InfoCache = A.getInfoCache();
1658     if (const Function *Fn = getAnchorScope()) {
1659       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1660       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1661     }
1662 
1663     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1664                             AANonNull::StateType &T, bool Stripped) -> bool {
1665       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1666       if (!Stripped && this == &AA) {
1667         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1668           T.indicatePessimisticFixpoint();
1669       } else {
1670         // Use abstract attribute information.
1671         const AANonNull::StateType &NS =
1672             static_cast<const AANonNull::StateType &>(AA.getState());
1673         T ^= NS;
1674       }
1675       return T.isValidState();
1676     };
1677 
1678     StateType T;
1679     if (!genericValueTraversal<AANonNull, StateType>(
1680             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1681       return indicatePessimisticFixpoint();
1682 
1683     return clampStateAndIndicateChange(getState(), T);
1684   }
1685 
1686   /// See AbstractAttribute::trackStatistics()
1687   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1688 };
1689 
1690 /// NonNull attribute for function return value.
1691 struct AANonNullReturned final
1692     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1693   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1694       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1695 
1696   /// See AbstractAttribute::trackStatistics()
1697   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1698 };
1699 
1700 /// NonNull attribute for function argument.
1701 struct AANonNullArgument final
1702     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1703   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1704       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1705 
1706   /// See AbstractAttribute::trackStatistics()
1707   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1708 };
1709 
1710 struct AANonNullCallSiteArgument final : AANonNullFloating {
1711   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1712       : AANonNullFloating(IRP, A) {}
1713 
1714   /// See AbstractAttribute::trackStatistics()
1715   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1716 };
1717 
1718 /// NonNull attribute for a call site return position.
1719 struct AANonNullCallSiteReturned final
1720     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1721   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1722       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1723 
1724   /// See AbstractAttribute::trackStatistics()
1725   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1726 };
1727 
1728 /// ------------------------ No-Recurse Attributes ----------------------------
1729 
1730 struct AANoRecurseImpl : public AANoRecurse {
1731   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1732 
1733   /// See AbstractAttribute::getAsStr()
1734   const std::string getAsStr() const override {
1735     return getAssumed() ? "norecurse" : "may-recurse";
1736   }
1737 };
1738 
1739 struct AANoRecurseFunction final : AANoRecurseImpl {
1740   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1741       : AANoRecurseImpl(IRP, A) {}
1742 
1743   /// See AbstractAttribute::initialize(...).
1744   void initialize(Attributor &A) override {
1745     AANoRecurseImpl::initialize(A);
1746     if (const Function *F = getAnchorScope())
1747       if (A.getInfoCache().getSccSize(*F) != 1)
1748         indicatePessimisticFixpoint();
1749   }
1750 
1751   /// See AbstractAttribute::updateImpl(...).
1752   ChangeStatus updateImpl(Attributor &A) override {
1753 
1754     // If all live call sites are known to be no-recurse, we are as well.
1755     auto CallSitePred = [&](AbstractCallSite ACS) {
1756       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1757           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1758           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1759       return NoRecurseAA.isKnownNoRecurse();
1760     };
1761     bool AllCallSitesKnown;
1762     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1763       // If we know all call sites and all are known no-recurse, we are done.
1764       // If all known call sites, which might not be all that exist, are known
1765       // to be no-recurse, we are not done but we can continue to assume
1766       // no-recurse. If one of the call sites we have not visited will become
1767       // live, another update is triggered.
1768       if (AllCallSitesKnown)
1769         indicateOptimisticFixpoint();
1770       return ChangeStatus::UNCHANGED;
1771     }
1772 
1773     // If the above check does not hold anymore we look at the calls.
1774     auto CheckForNoRecurse = [&](Instruction &I) {
1775       const auto &CB = cast<CallBase>(I);
1776       if (CB.hasFnAttr(Attribute::NoRecurse))
1777         return true;
1778 
1779       const auto &NoRecurseAA =
1780           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1781       if (!NoRecurseAA.isAssumedNoRecurse())
1782         return false;
1783 
1784       // Recursion to the same function
1785       if (CB.getCalledFunction() == getAnchorScope())
1786         return false;
1787 
1788       return true;
1789     };
1790 
1791     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1792       return indicatePessimisticFixpoint();
1793     return ChangeStatus::UNCHANGED;
1794   }
1795 
1796   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1797 };
1798 
1799 /// NoRecurse attribute deduction for a call sites.
1800 struct AANoRecurseCallSite final : AANoRecurseImpl {
1801   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1802       : AANoRecurseImpl(IRP, A) {}
1803 
1804   /// See AbstractAttribute::initialize(...).
1805   void initialize(Attributor &A) override {
1806     AANoRecurseImpl::initialize(A);
1807     Function *F = getAssociatedFunction();
1808     if (!F)
1809       indicatePessimisticFixpoint();
1810   }
1811 
1812   /// See AbstractAttribute::updateImpl(...).
1813   ChangeStatus updateImpl(Attributor &A) override {
1814     // TODO: Once we have call site specific value information we can provide
1815     //       call site specific liveness information and then it makes
1816     //       sense to specialize attributes for call sites arguments instead of
1817     //       redirecting requests to the callee argument.
1818     Function *F = getAssociatedFunction();
1819     const IRPosition &FnPos = IRPosition::function(*F);
1820     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1821     return clampStateAndIndicateChange(
1822         getState(),
1823         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1824   }
1825 
1826   /// See AbstractAttribute::trackStatistics()
1827   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1828 };
1829 
1830 /// -------------------- Undefined-Behavior Attributes ------------------------
1831 
1832 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1833   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1834       : AAUndefinedBehavior(IRP, A) {}
1835 
1836   /// See AbstractAttribute::updateImpl(...).
1837   // through a pointer (i.e. also branches etc.)
1838   ChangeStatus updateImpl(Attributor &A) override {
1839     const size_t UBPrevSize = KnownUBInsts.size();
1840     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1841 
1842     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1843       // Skip instructions that are already saved.
1844       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1845         return true;
1846 
1847       // If we reach here, we know we have an instruction
1848       // that accesses memory through a pointer operand,
1849       // for which getPointerOperand() should give it to us.
1850       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1851       assert(PtrOp &&
1852              "Expected pointer operand of memory accessing instruction");
1853 
1854       // Either we stopped and the appropriate action was taken,
1855       // or we got back a simplified value to continue.
1856       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1857       if (!SimplifiedPtrOp.hasValue())
1858         return true;
1859       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1860 
1861       // A memory access through a pointer is considered UB
1862       // only if the pointer has constant null value.
1863       // TODO: Expand it to not only check constant values.
1864       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1865         AssumedNoUBInsts.insert(&I);
1866         return true;
1867       }
1868       const Type *PtrTy = PtrOpVal->getType();
1869 
1870       // Because we only consider instructions inside functions,
1871       // assume that a parent function exists.
1872       const Function *F = I.getFunction();
1873 
1874       // A memory access using constant null pointer is only considered UB
1875       // if null pointer is _not_ defined for the target platform.
1876       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1877         AssumedNoUBInsts.insert(&I);
1878       else
1879         KnownUBInsts.insert(&I);
1880       return true;
1881     };
1882 
1883     auto InspectBrInstForUB = [&](Instruction &I) {
1884       // A conditional branch instruction is considered UB if it has `undef`
1885       // condition.
1886 
1887       // Skip instructions that are already saved.
1888       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1889         return true;
1890 
1891       // We know we have a branch instruction.
1892       auto BrInst = cast<BranchInst>(&I);
1893 
1894       // Unconditional branches are never considered UB.
1895       if (BrInst->isUnconditional())
1896         return true;
1897 
1898       // Either we stopped and the appropriate action was taken,
1899       // or we got back a simplified value to continue.
1900       Optional<Value *> SimplifiedCond =
1901           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1902       if (!SimplifiedCond.hasValue())
1903         return true;
1904       AssumedNoUBInsts.insert(&I);
1905       return true;
1906     };
1907 
1908     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1909                               {Instruction::Load, Instruction::Store,
1910                                Instruction::AtomicCmpXchg,
1911                                Instruction::AtomicRMW},
1912                               /* CheckBBLivenessOnly */ true);
1913     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1914                               /* CheckBBLivenessOnly */ true);
1915     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1916         UBPrevSize != KnownUBInsts.size())
1917       return ChangeStatus::CHANGED;
1918     return ChangeStatus::UNCHANGED;
1919   }
1920 
1921   bool isKnownToCauseUB(Instruction *I) const override {
1922     return KnownUBInsts.count(I);
1923   }
1924 
1925   bool isAssumedToCauseUB(Instruction *I) const override {
1926     // In simple words, if an instruction is not in the assumed to _not_
1927     // cause UB, then it is assumed UB (that includes those
1928     // in the KnownUBInsts set). The rest is boilerplate
1929     // is to ensure that it is one of the instructions we test
1930     // for UB.
1931 
1932     switch (I->getOpcode()) {
1933     case Instruction::Load:
1934     case Instruction::Store:
1935     case Instruction::AtomicCmpXchg:
1936     case Instruction::AtomicRMW:
1937       return !AssumedNoUBInsts.count(I);
1938     case Instruction::Br: {
1939       auto BrInst = cast<BranchInst>(I);
1940       if (BrInst->isUnconditional())
1941         return false;
1942       return !AssumedNoUBInsts.count(I);
1943     } break;
1944     default:
1945       return false;
1946     }
1947     return false;
1948   }
1949 
1950   ChangeStatus manifest(Attributor &A) override {
1951     if (KnownUBInsts.empty())
1952       return ChangeStatus::UNCHANGED;
1953     for (Instruction *I : KnownUBInsts)
1954       A.changeToUnreachableAfterManifest(I);
1955     return ChangeStatus::CHANGED;
1956   }
1957 
1958   /// See AbstractAttribute::getAsStr()
1959   const std::string getAsStr() const override {
1960     return getAssumed() ? "undefined-behavior" : "no-ub";
1961   }
1962 
1963   /// Note: The correctness of this analysis depends on the fact that the
1964   /// following 2 sets will stop changing after some point.
1965   /// "Change" here means that their size changes.
1966   /// The size of each set is monotonically increasing
1967   /// (we only add items to them) and it is upper bounded by the number of
1968   /// instructions in the processed function (we can never save more
1969   /// elements in either set than this number). Hence, at some point,
1970   /// they will stop increasing.
1971   /// Consequently, at some point, both sets will have stopped
1972   /// changing, effectively making the analysis reach a fixpoint.
1973 
1974   /// Note: These 2 sets are disjoint and an instruction can be considered
1975   /// one of 3 things:
1976   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
1977   ///    the KnownUBInsts set.
1978   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
1979   ///    has a reason to assume it).
1980   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
1981   ///    could not find a reason to assume or prove that it can cause UB,
1982   ///    hence it assumes it doesn't. We have a set for these instructions
1983   ///    so that we don't reprocess them in every update.
1984   ///    Note however that instructions in this set may cause UB.
1985 
1986 protected:
1987   /// A set of all live instructions _known_ to cause UB.
1988   SmallPtrSet<Instruction *, 8> KnownUBInsts;
1989 
1990 private:
1991   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
1992   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
1993 
1994   // Should be called on updates in which if we're processing an instruction
1995   // \p I that depends on a value \p V, one of the following has to happen:
1996   // - If the value is assumed, then stop.
1997   // - If the value is known but undef, then consider it UB.
1998   // - Otherwise, do specific processing with the simplified value.
1999   // We return None in the first 2 cases to signify that an appropriate
2000   // action was taken and the caller should stop.
2001   // Otherwise, we return the simplified value that the caller should
2002   // use for specific processing.
2003   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2004                                          Instruction *I) {
2005     const auto &ValueSimplifyAA =
2006         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2007     Optional<Value *> SimplifiedV =
2008         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2009     if (!ValueSimplifyAA.isKnown()) {
2010       // Don't depend on assumed values.
2011       return llvm::None;
2012     }
2013     if (!SimplifiedV.hasValue()) {
2014       // If it is known (which we tested above) but it doesn't have a value,
2015       // then we can assume `undef` and hence the instruction is UB.
2016       KnownUBInsts.insert(I);
2017       return llvm::None;
2018     }
2019     Value *Val = SimplifiedV.getValue();
2020     if (isa<UndefValue>(Val)) {
2021       KnownUBInsts.insert(I);
2022       return llvm::None;
2023     }
2024     return Val;
2025   }
2026 };
2027 
2028 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2029   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2030       : AAUndefinedBehaviorImpl(IRP, A) {}
2031 
2032   /// See AbstractAttribute::trackStatistics()
2033   void trackStatistics() const override {
2034     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2035                "Number of instructions known to have UB");
2036     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2037         KnownUBInsts.size();
2038   }
2039 };
2040 
2041 /// ------------------------ Will-Return Attributes ----------------------------
2042 
2043 // Helper function that checks whether a function has any cycle which we don't
2044 // know if it is bounded or not.
2045 // Loops with maximum trip count are considered bounded, any other cycle not.
2046 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2047   ScalarEvolution *SE =
2048       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2049   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2050   // If either SCEV or LoopInfo is not available for the function then we assume
2051   // any cycle to be unbounded cycle.
2052   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2053   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2054   if (!SE || !LI) {
2055     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2056       if (SCCI.hasCycle())
2057         return true;
2058     return false;
2059   }
2060 
2061   // If there's irreducible control, the function may contain non-loop cycles.
2062   if (mayContainIrreducibleControl(F, LI))
2063     return true;
2064 
2065   // Any loop that does not have a max trip count is considered unbounded cycle.
2066   for (auto *L : LI->getLoopsInPreorder()) {
2067     if (!SE->getSmallConstantMaxTripCount(L))
2068       return true;
2069   }
2070   return false;
2071 }
2072 
2073 struct AAWillReturnImpl : public AAWillReturn {
2074   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2075       : AAWillReturn(IRP, A) {}
2076 
2077   /// See AbstractAttribute::initialize(...).
2078   void initialize(Attributor &A) override {
2079     AAWillReturn::initialize(A);
2080 
2081     Function *F = getAnchorScope();
2082     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2083       indicatePessimisticFixpoint();
2084   }
2085 
2086   /// See AbstractAttribute::updateImpl(...).
2087   ChangeStatus updateImpl(Attributor &A) override {
2088     auto CheckForWillReturn = [&](Instruction &I) {
2089       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2090       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2091       if (WillReturnAA.isKnownWillReturn())
2092         return true;
2093       if (!WillReturnAA.isAssumedWillReturn())
2094         return false;
2095       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2096       return NoRecurseAA.isAssumedNoRecurse();
2097     };
2098 
2099     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2100       return indicatePessimisticFixpoint();
2101 
2102     return ChangeStatus::UNCHANGED;
2103   }
2104 
2105   /// See AbstractAttribute::getAsStr()
2106   const std::string getAsStr() const override {
2107     return getAssumed() ? "willreturn" : "may-noreturn";
2108   }
2109 };
2110 
2111 struct AAWillReturnFunction final : AAWillReturnImpl {
2112   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2113       : AAWillReturnImpl(IRP, A) {}
2114 
2115   /// See AbstractAttribute::trackStatistics()
2116   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2117 };
2118 
2119 /// WillReturn attribute deduction for a call sites.
2120 struct AAWillReturnCallSite final : AAWillReturnImpl {
2121   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2122       : AAWillReturnImpl(IRP, A) {}
2123 
2124   /// See AbstractAttribute::initialize(...).
2125   void initialize(Attributor &A) override {
2126     AAWillReturnImpl::initialize(A);
2127     Function *F = getAssociatedFunction();
2128     if (!F)
2129       indicatePessimisticFixpoint();
2130   }
2131 
2132   /// See AbstractAttribute::updateImpl(...).
2133   ChangeStatus updateImpl(Attributor &A) override {
2134     // TODO: Once we have call site specific value information we can provide
2135     //       call site specific liveness information and then it makes
2136     //       sense to specialize attributes for call sites arguments instead of
2137     //       redirecting requests to the callee argument.
2138     Function *F = getAssociatedFunction();
2139     const IRPosition &FnPos = IRPosition::function(*F);
2140     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2141     return clampStateAndIndicateChange(
2142         getState(),
2143         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2144   }
2145 
2146   /// See AbstractAttribute::trackStatistics()
2147   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2148 };
2149 
2150 /// -------------------AAReachability Attribute--------------------------
2151 
2152 struct AAReachabilityImpl : AAReachability {
2153   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2154       : AAReachability(IRP, A) {}
2155 
2156   const std::string getAsStr() const override {
2157     // TODO: Return the number of reachable queries.
2158     return "reachable";
2159   }
2160 
2161   /// See AbstractAttribute::initialize(...).
2162   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2163 
2164   /// See AbstractAttribute::updateImpl(...).
2165   ChangeStatus updateImpl(Attributor &A) override {
2166     return indicatePessimisticFixpoint();
2167   }
2168 };
2169 
2170 struct AAReachabilityFunction final : public AAReachabilityImpl {
2171   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2172       : AAReachabilityImpl(IRP, A) {}
2173 
2174   /// See AbstractAttribute::trackStatistics()
2175   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2176 };
2177 
2178 /// ------------------------ NoAlias Argument Attribute ------------------------
2179 
2180 struct AANoAliasImpl : AANoAlias {
2181   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2182     assert(getAssociatedType()->isPointerTy() &&
2183            "Noalias is a pointer attribute");
2184   }
2185 
2186   const std::string getAsStr() const override {
2187     return getAssumed() ? "noalias" : "may-alias";
2188   }
2189 };
2190 
2191 /// NoAlias attribute for a floating value.
2192 struct AANoAliasFloating final : AANoAliasImpl {
2193   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2194       : AANoAliasImpl(IRP, A) {}
2195 
2196   /// See AbstractAttribute::initialize(...).
2197   void initialize(Attributor &A) override {
2198     AANoAliasImpl::initialize(A);
2199     Value *Val = &getAssociatedValue();
2200     do {
2201       CastInst *CI = dyn_cast<CastInst>(Val);
2202       if (!CI)
2203         break;
2204       Value *Base = CI->getOperand(0);
2205       if (!Base->hasOneUse())
2206         break;
2207       Val = Base;
2208     } while (true);
2209 
2210     if (!Val->getType()->isPointerTy()) {
2211       indicatePessimisticFixpoint();
2212       return;
2213     }
2214 
2215     if (isa<AllocaInst>(Val))
2216       indicateOptimisticFixpoint();
2217     else if (isa<ConstantPointerNull>(Val) &&
2218              !NullPointerIsDefined(getAnchorScope(),
2219                                    Val->getType()->getPointerAddressSpace()))
2220       indicateOptimisticFixpoint();
2221     else if (Val != &getAssociatedValue()) {
2222       const auto &ValNoAliasAA =
2223           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2224       if (ValNoAliasAA.isKnownNoAlias())
2225         indicateOptimisticFixpoint();
2226     }
2227   }
2228 
2229   /// See AbstractAttribute::updateImpl(...).
2230   ChangeStatus updateImpl(Attributor &A) override {
2231     // TODO: Implement this.
2232     return indicatePessimisticFixpoint();
2233   }
2234 
2235   /// See AbstractAttribute::trackStatistics()
2236   void trackStatistics() const override {
2237     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2238   }
2239 };
2240 
2241 /// NoAlias attribute for an argument.
2242 struct AANoAliasArgument final
2243     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2244   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2245   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2246 
2247   /// See AbstractAttribute::initialize(...).
2248   void initialize(Attributor &A) override {
2249     Base::initialize(A);
2250     // See callsite argument attribute and callee argument attribute.
2251     if (hasAttr({Attribute::ByVal}))
2252       indicateOptimisticFixpoint();
2253   }
2254 
2255   /// See AbstractAttribute::update(...).
2256   ChangeStatus updateImpl(Attributor &A) override {
2257     // We have to make sure no-alias on the argument does not break
2258     // synchronization when this is a callback argument, see also [1] below.
2259     // If synchronization cannot be affected, we delegate to the base updateImpl
2260     // function, otherwise we give up for now.
2261 
2262     // If the function is no-sync, no-alias cannot break synchronization.
2263     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2264         *this, IRPosition::function_scope(getIRPosition()));
2265     if (NoSyncAA.isAssumedNoSync())
2266       return Base::updateImpl(A);
2267 
2268     // If the argument is read-only, no-alias cannot break synchronization.
2269     const auto &MemBehaviorAA =
2270         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2271     if (MemBehaviorAA.isAssumedReadOnly())
2272       return Base::updateImpl(A);
2273 
2274     // If the argument is never passed through callbacks, no-alias cannot break
2275     // synchronization.
2276     bool AllCallSitesKnown;
2277     if (A.checkForAllCallSites(
2278             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2279             true, AllCallSitesKnown))
2280       return Base::updateImpl(A);
2281 
2282     // TODO: add no-alias but make sure it doesn't break synchronization by
2283     // introducing fake uses. See:
2284     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2285     //     International Workshop on OpenMP 2018,
2286     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2287 
2288     return indicatePessimisticFixpoint();
2289   }
2290 
2291   /// See AbstractAttribute::trackStatistics()
2292   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2293 };
2294 
2295 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2296   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2297       : AANoAliasImpl(IRP, A) {}
2298 
2299   /// See AbstractAttribute::initialize(...).
2300   void initialize(Attributor &A) override {
2301     // See callsite argument attribute and callee argument attribute.
2302     const auto &CB = cast<CallBase>(getAnchorValue());
2303     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2304       indicateOptimisticFixpoint();
2305     Value &Val = getAssociatedValue();
2306     if (isa<ConstantPointerNull>(Val) &&
2307         !NullPointerIsDefined(getAnchorScope(),
2308                               Val.getType()->getPointerAddressSpace()))
2309       indicateOptimisticFixpoint();
2310   }
2311 
2312   /// Determine if the underlying value may alias with the call site argument
2313   /// \p OtherArgNo of \p ICS (= the underlying call site).
2314   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2315                             const AAMemoryBehavior &MemBehaviorAA,
2316                             const CallBase &CB, unsigned OtherArgNo) {
2317     // We do not need to worry about aliasing with the underlying IRP.
2318     if (this->getArgNo() == (int)OtherArgNo)
2319       return false;
2320 
2321     // If it is not a pointer or pointer vector we do not alias.
2322     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2323     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2324       return false;
2325 
2326     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2327         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2328         /* TrackDependence */ false);
2329 
2330     // If the argument is readnone, there is no read-write aliasing.
2331     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2332       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2333       return false;
2334     }
2335 
2336     // If the argument is readonly and the underlying value is readonly, there
2337     // is no read-write aliasing.
2338     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2339     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2340       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2341       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2342       return false;
2343     }
2344 
2345     // We have to utilize actual alias analysis queries so we need the object.
2346     if (!AAR)
2347       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2348 
2349     // Try to rule it out at the call site.
2350     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2351     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2352                          "callsite arguments: "
2353                       << getAssociatedValue() << " " << *ArgOp << " => "
2354                       << (IsAliasing ? "" : "no-") << "alias \n");
2355 
2356     return IsAliasing;
2357   }
2358 
2359   bool
2360   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2361                                          const AAMemoryBehavior &MemBehaviorAA,
2362                                          const AANoAlias &NoAliasAA) {
2363     // We can deduce "noalias" if the following conditions hold.
2364     // (i)   Associated value is assumed to be noalias in the definition.
2365     // (ii)  Associated value is assumed to be no-capture in all the uses
2366     //       possibly executed before this callsite.
2367     // (iii) There is no other pointer argument which could alias with the
2368     //       value.
2369 
2370     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2371     if (!AssociatedValueIsNoAliasAtDef) {
2372       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2373                         << " is not no-alias at the definition\n");
2374       return false;
2375     }
2376 
2377     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2378 
2379     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2380     auto &NoCaptureAA =
2381         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2382     // Check whether the value is captured in the scope using AANoCapture.
2383     //      Look at CFG and check only uses possibly executed before this
2384     //      callsite.
2385     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2386       Instruction *UserI = cast<Instruction>(U.getUser());
2387 
2388       // If user if curr instr and only use.
2389       if (UserI == getCtxI() && UserI->hasOneUse())
2390         return true;
2391 
2392       const Function *ScopeFn = VIRP.getAnchorScope();
2393       if (ScopeFn) {
2394         const auto &ReachabilityAA =
2395             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2396 
2397         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2398           return true;
2399 
2400         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2401           if (CB->isArgOperand(&U)) {
2402 
2403             unsigned ArgNo = CB->getArgOperandNo(&U);
2404 
2405             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2406                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2407 
2408             if (NoCaptureAA.isAssumedNoCapture())
2409               return true;
2410           }
2411         }
2412       }
2413 
2414       // For cases which can potentially have more users
2415       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2416           isa<SelectInst>(U)) {
2417         Follow = true;
2418         return true;
2419       }
2420 
2421       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2422       return false;
2423     };
2424 
2425     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2426       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2427         LLVM_DEBUG(
2428             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2429                    << " cannot be noalias as it is potentially captured\n");
2430         return false;
2431       }
2432     }
2433     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2434 
2435     // Check there is no other pointer argument which could alias with the
2436     // value passed at this call site.
2437     // TODO: AbstractCallSite
2438     const auto &CB = cast<CallBase>(getAnchorValue());
2439     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2440          OtherArgNo++)
2441       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2442         return false;
2443 
2444     return true;
2445   }
2446 
2447   /// See AbstractAttribute::updateImpl(...).
2448   ChangeStatus updateImpl(Attributor &A) override {
2449     // If the argument is readnone we are done as there are no accesses via the
2450     // argument.
2451     auto &MemBehaviorAA =
2452         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2453                                      /* TrackDependence */ false);
2454     if (MemBehaviorAA.isAssumedReadNone()) {
2455       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2456       return ChangeStatus::UNCHANGED;
2457     }
2458 
2459     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2460     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2461                                                   /* TrackDependence */ false);
2462 
2463     AAResults *AAR = nullptr;
2464     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2465                                                NoAliasAA)) {
2466       LLVM_DEBUG(
2467           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2468       return ChangeStatus::UNCHANGED;
2469     }
2470 
2471     return indicatePessimisticFixpoint();
2472   }
2473 
2474   /// See AbstractAttribute::trackStatistics()
2475   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2476 };
2477 
2478 /// NoAlias attribute for function return value.
2479 struct AANoAliasReturned final : AANoAliasImpl {
2480   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2481       : AANoAliasImpl(IRP, A) {}
2482 
2483   /// See AbstractAttribute::updateImpl(...).
2484   virtual ChangeStatus updateImpl(Attributor &A) override {
2485 
2486     auto CheckReturnValue = [&](Value &RV) -> bool {
2487       if (Constant *C = dyn_cast<Constant>(&RV))
2488         if (C->isNullValue() || isa<UndefValue>(C))
2489           return true;
2490 
2491       /// For now, we can only deduce noalias if we have call sites.
2492       /// FIXME: add more support.
2493       if (!isa<CallBase>(&RV))
2494         return false;
2495 
2496       const IRPosition &RVPos = IRPosition::value(RV);
2497       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2498       if (!NoAliasAA.isAssumedNoAlias())
2499         return false;
2500 
2501       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2502       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2503     };
2504 
2505     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2506       return indicatePessimisticFixpoint();
2507 
2508     return ChangeStatus::UNCHANGED;
2509   }
2510 
2511   /// See AbstractAttribute::trackStatistics()
2512   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2513 };
2514 
2515 /// NoAlias attribute deduction for a call site return value.
2516 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2517   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2518       : AANoAliasImpl(IRP, A) {}
2519 
2520   /// See AbstractAttribute::initialize(...).
2521   void initialize(Attributor &A) override {
2522     AANoAliasImpl::initialize(A);
2523     Function *F = getAssociatedFunction();
2524     if (!F)
2525       indicatePessimisticFixpoint();
2526   }
2527 
2528   /// See AbstractAttribute::updateImpl(...).
2529   ChangeStatus updateImpl(Attributor &A) override {
2530     // TODO: Once we have call site specific value information we can provide
2531     //       call site specific liveness information and then it makes
2532     //       sense to specialize attributes for call sites arguments instead of
2533     //       redirecting requests to the callee argument.
2534     Function *F = getAssociatedFunction();
2535     const IRPosition &FnPos = IRPosition::returned(*F);
2536     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2537     return clampStateAndIndicateChange(
2538         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2539   }
2540 
2541   /// See AbstractAttribute::trackStatistics()
2542   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2543 };
2544 
2545 /// -------------------AAIsDead Function Attribute-----------------------
2546 
2547 struct AAIsDeadValueImpl : public AAIsDead {
2548   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2549 
2550   /// See AAIsDead::isAssumedDead().
2551   bool isAssumedDead() const override { return getAssumed(); }
2552 
2553   /// See AAIsDead::isKnownDead().
2554   bool isKnownDead() const override { return getKnown(); }
2555 
2556   /// See AAIsDead::isAssumedDead(BasicBlock *).
2557   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2558 
2559   /// See AAIsDead::isKnownDead(BasicBlock *).
2560   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2561 
2562   /// See AAIsDead::isAssumedDead(Instruction *I).
2563   bool isAssumedDead(const Instruction *I) const override {
2564     return I == getCtxI() && isAssumedDead();
2565   }
2566 
2567   /// See AAIsDead::isKnownDead(Instruction *I).
2568   bool isKnownDead(const Instruction *I) const override {
2569     return isAssumedDead(I) && getKnown();
2570   }
2571 
2572   /// See AbstractAttribute::getAsStr().
2573   const std::string getAsStr() const override {
2574     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2575   }
2576 
2577   /// Check if all uses are assumed dead.
2578   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2579     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2580     // Explicitly set the dependence class to required because we want a long
2581     // chain of N dependent instructions to be considered live as soon as one is
2582     // without going through N update cycles. This is not required for
2583     // correctness.
2584     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2585   }
2586 
2587   /// Determine if \p I is assumed to be side-effect free.
2588   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2589     if (!I || wouldInstructionBeTriviallyDead(I))
2590       return true;
2591 
2592     auto *CB = dyn_cast<CallBase>(I);
2593     if (!CB || isa<IntrinsicInst>(CB))
2594       return false;
2595 
2596     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2597     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2598     if (!NoUnwindAA.isAssumedNoUnwind())
2599       return false;
2600 
2601     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2602         *this, CallIRP, /* TrackDependence */ false);
2603     if (MemBehaviorAA.isAssumedReadOnly()) {
2604       if (!MemBehaviorAA.isKnownReadOnly())
2605         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2606       return true;
2607     }
2608     return false;
2609   }
2610 };
2611 
2612 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2613   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2614       : AAIsDeadValueImpl(IRP, A) {}
2615 
2616   /// See AbstractAttribute::initialize(...).
2617   void initialize(Attributor &A) override {
2618     if (isa<UndefValue>(getAssociatedValue())) {
2619       indicatePessimisticFixpoint();
2620       return;
2621     }
2622 
2623     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2624     if (!isAssumedSideEffectFree(A, I))
2625       indicatePessimisticFixpoint();
2626   }
2627 
2628   /// See AbstractAttribute::updateImpl(...).
2629   ChangeStatus updateImpl(Attributor &A) override {
2630     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2631     if (!isAssumedSideEffectFree(A, I))
2632       return indicatePessimisticFixpoint();
2633 
2634     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2635       return indicatePessimisticFixpoint();
2636     return ChangeStatus::UNCHANGED;
2637   }
2638 
2639   /// See AbstractAttribute::manifest(...).
2640   ChangeStatus manifest(Attributor &A) override {
2641     Value &V = getAssociatedValue();
2642     if (auto *I = dyn_cast<Instruction>(&V)) {
2643       // If we get here we basically know the users are all dead. We check if
2644       // isAssumedSideEffectFree returns true here again because it might not be
2645       // the case and only the users are dead but the instruction (=call) is
2646       // still needed.
2647       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2648         A.deleteAfterManifest(*I);
2649         return ChangeStatus::CHANGED;
2650       }
2651     }
2652     if (V.use_empty())
2653       return ChangeStatus::UNCHANGED;
2654 
2655     bool UsedAssumedInformation = false;
2656     Optional<Constant *> C =
2657         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2658     if (C.hasValue() && C.getValue())
2659       return ChangeStatus::UNCHANGED;
2660 
2661     // Replace the value with undef as it is dead but keep droppable uses around
2662     // as they provide information we don't want to give up on just yet.
2663     UndefValue &UV = *UndefValue::get(V.getType());
2664     bool AnyChange =
2665         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2666     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2667   }
2668 
2669   /// See AbstractAttribute::trackStatistics()
2670   void trackStatistics() const override {
2671     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2672   }
2673 };
2674 
2675 struct AAIsDeadArgument : public AAIsDeadFloating {
2676   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2677       : AAIsDeadFloating(IRP, A) {}
2678 
2679   /// See AbstractAttribute::initialize(...).
2680   void initialize(Attributor &A) override {
2681     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2682       indicatePessimisticFixpoint();
2683   }
2684 
2685   /// See AbstractAttribute::manifest(...).
2686   ChangeStatus manifest(Attributor &A) override {
2687     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2688     Argument &Arg = *getAssociatedArgument();
2689     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2690       if (A.registerFunctionSignatureRewrite(
2691               Arg, /* ReplacementTypes */ {},
2692               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2693               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2694         Arg.dropDroppableUses();
2695         return ChangeStatus::CHANGED;
2696       }
2697     return Changed;
2698   }
2699 
2700   /// See AbstractAttribute::trackStatistics()
2701   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2702 };
2703 
2704 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2705   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2706       : AAIsDeadValueImpl(IRP, A) {}
2707 
2708   /// See AbstractAttribute::initialize(...).
2709   void initialize(Attributor &A) override {
2710     if (isa<UndefValue>(getAssociatedValue()))
2711       indicatePessimisticFixpoint();
2712   }
2713 
2714   /// See AbstractAttribute::updateImpl(...).
2715   ChangeStatus updateImpl(Attributor &A) override {
2716     // TODO: Once we have call site specific value information we can provide
2717     //       call site specific liveness information and then it makes
2718     //       sense to specialize attributes for call sites arguments instead of
2719     //       redirecting requests to the callee argument.
2720     Argument *Arg = getAssociatedArgument();
2721     if (!Arg)
2722       return indicatePessimisticFixpoint();
2723     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2724     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2725     return clampStateAndIndicateChange(
2726         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2727   }
2728 
2729   /// See AbstractAttribute::manifest(...).
2730   ChangeStatus manifest(Attributor &A) override {
2731     CallBase &CB = cast<CallBase>(getAnchorValue());
2732     Use &U = CB.getArgOperandUse(getArgNo());
2733     assert(!isa<UndefValue>(U.get()) &&
2734            "Expected undef values to be filtered out!");
2735     UndefValue &UV = *UndefValue::get(U->getType());
2736     if (A.changeUseAfterManifest(U, UV))
2737       return ChangeStatus::CHANGED;
2738     return ChangeStatus::UNCHANGED;
2739   }
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2743 };
2744 
2745 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2746   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2747       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2748 
2749   /// See AAIsDead::isAssumedDead().
2750   bool isAssumedDead() const override {
2751     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2752   }
2753 
2754   /// See AbstractAttribute::initialize(...).
2755   void initialize(Attributor &A) override {
2756     if (isa<UndefValue>(getAssociatedValue())) {
2757       indicatePessimisticFixpoint();
2758       return;
2759     }
2760 
2761     // We track this separately as a secondary state.
2762     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2763   }
2764 
2765   /// See AbstractAttribute::updateImpl(...).
2766   ChangeStatus updateImpl(Attributor &A) override {
2767     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2768     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2769       IsAssumedSideEffectFree = false;
2770       Changed = ChangeStatus::CHANGED;
2771     }
2772 
2773     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2774       return indicatePessimisticFixpoint();
2775     return Changed;
2776   }
2777 
2778   /// See AbstractAttribute::trackStatistics()
2779   void trackStatistics() const override {
2780     if (IsAssumedSideEffectFree)
2781       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2782     else
2783       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2784   }
2785 
2786   /// See AbstractAttribute::getAsStr().
2787   const std::string getAsStr() const override {
2788     return isAssumedDead()
2789                ? "assumed-dead"
2790                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2791   }
2792 
2793 private:
2794   bool IsAssumedSideEffectFree;
2795 };
2796 
2797 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2798   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2799       : AAIsDeadValueImpl(IRP, A) {}
2800 
2801   /// See AbstractAttribute::updateImpl(...).
2802   ChangeStatus updateImpl(Attributor &A) override {
2803 
2804     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2805                               {Instruction::Ret});
2806 
2807     auto PredForCallSite = [&](AbstractCallSite ACS) {
2808       if (ACS.isCallbackCall() || !ACS.getInstruction())
2809         return false;
2810       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2811     };
2812 
2813     bool AllCallSitesKnown;
2814     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2815                                 AllCallSitesKnown))
2816       return indicatePessimisticFixpoint();
2817 
2818     return ChangeStatus::UNCHANGED;
2819   }
2820 
2821   /// See AbstractAttribute::manifest(...).
2822   ChangeStatus manifest(Attributor &A) override {
2823     // TODO: Rewrite the signature to return void?
2824     bool AnyChange = false;
2825     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2826     auto RetInstPred = [&](Instruction &I) {
2827       ReturnInst &RI = cast<ReturnInst>(I);
2828       if (!isa<UndefValue>(RI.getReturnValue()))
2829         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2830       return true;
2831     };
2832     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2833     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2834   }
2835 
2836   /// See AbstractAttribute::trackStatistics()
2837   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2838 };
2839 
2840 struct AAIsDeadFunction : public AAIsDead {
2841   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2842 
2843   /// See AbstractAttribute::initialize(...).
2844   void initialize(Attributor &A) override {
2845     const Function *F = getAnchorScope();
2846     if (F && !F->isDeclaration()) {
2847       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2848       assumeLive(A, F->getEntryBlock());
2849     }
2850   }
2851 
2852   /// See AbstractAttribute::getAsStr().
2853   const std::string getAsStr() const override {
2854     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2855            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2856            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2857            std::to_string(KnownDeadEnds.size()) + "]";
2858   }
2859 
2860   /// See AbstractAttribute::manifest(...).
2861   ChangeStatus manifest(Attributor &A) override {
2862     assert(getState().isValidState() &&
2863            "Attempted to manifest an invalid state!");
2864 
2865     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2866     Function &F = *getAnchorScope();
2867 
2868     if (AssumedLiveBlocks.empty()) {
2869       A.deleteAfterManifest(F);
2870       return ChangeStatus::CHANGED;
2871     }
2872 
2873     // Flag to determine if we can change an invoke to a call assuming the
2874     // callee is nounwind. This is not possible if the personality of the
2875     // function allows to catch asynchronous exceptions.
2876     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2877 
2878     KnownDeadEnds.set_union(ToBeExploredFrom);
2879     for (const Instruction *DeadEndI : KnownDeadEnds) {
2880       auto *CB = dyn_cast<CallBase>(DeadEndI);
2881       if (!CB)
2882         continue;
2883       const auto &NoReturnAA =
2884           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2885       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2886       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2887         continue;
2888 
2889       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2890         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2891       else
2892         A.changeToUnreachableAfterManifest(
2893             const_cast<Instruction *>(DeadEndI->getNextNode()));
2894       HasChanged = ChangeStatus::CHANGED;
2895     }
2896 
2897     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2898     for (BasicBlock &BB : F)
2899       if (!AssumedLiveBlocks.count(&BB)) {
2900         A.deleteAfterManifest(BB);
2901         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2902       }
2903 
2904     return HasChanged;
2905   }
2906 
2907   /// See AbstractAttribute::updateImpl(...).
2908   ChangeStatus updateImpl(Attributor &A) override;
2909 
2910   /// See AbstractAttribute::trackStatistics()
2911   void trackStatistics() const override {}
2912 
2913   /// Returns true if the function is assumed dead.
2914   bool isAssumedDead() const override { return false; }
2915 
2916   /// See AAIsDead::isKnownDead().
2917   bool isKnownDead() const override { return false; }
2918 
2919   /// See AAIsDead::isAssumedDead(BasicBlock *).
2920   bool isAssumedDead(const BasicBlock *BB) const override {
2921     assert(BB->getParent() == getAnchorScope() &&
2922            "BB must be in the same anchor scope function.");
2923 
2924     if (!getAssumed())
2925       return false;
2926     return !AssumedLiveBlocks.count(BB);
2927   }
2928 
2929   /// See AAIsDead::isKnownDead(BasicBlock *).
2930   bool isKnownDead(const BasicBlock *BB) const override {
2931     return getKnown() && isAssumedDead(BB);
2932   }
2933 
2934   /// See AAIsDead::isAssumed(Instruction *I).
2935   bool isAssumedDead(const Instruction *I) const override {
2936     assert(I->getParent()->getParent() == getAnchorScope() &&
2937            "Instruction must be in the same anchor scope function.");
2938 
2939     if (!getAssumed())
2940       return false;
2941 
2942     // If it is not in AssumedLiveBlocks then it for sure dead.
2943     // Otherwise, it can still be after noreturn call in a live block.
2944     if (!AssumedLiveBlocks.count(I->getParent()))
2945       return true;
2946 
2947     // If it is not after a liveness barrier it is live.
2948     const Instruction *PrevI = I->getPrevNode();
2949     while (PrevI) {
2950       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2951         return true;
2952       PrevI = PrevI->getPrevNode();
2953     }
2954     return false;
2955   }
2956 
2957   /// See AAIsDead::isKnownDead(Instruction *I).
2958   bool isKnownDead(const Instruction *I) const override {
2959     return getKnown() && isAssumedDead(I);
2960   }
2961 
2962   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2963   /// that internal function called from \p BB should now be looked at.
2964   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2965     if (!AssumedLiveBlocks.insert(&BB).second)
2966       return false;
2967 
2968     // We assume that all of BB is (probably) live now and if there are calls to
2969     // internal functions we will assume that those are now live as well. This
2970     // is a performance optimization for blocks with calls to a lot of internal
2971     // functions. It can however cause dead functions to be treated as live.
2972     for (const Instruction &I : BB)
2973       if (const auto *CB = dyn_cast<CallBase>(&I))
2974         if (const Function *F = CB->getCalledFunction())
2975           if (F->hasLocalLinkage())
2976             A.markLiveInternalFunction(*F);
2977     return true;
2978   }
2979 
2980   /// Collection of instructions that need to be explored again, e.g., we
2981   /// did assume they do not transfer control to (one of their) successors.
2982   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2983 
2984   /// Collection of instructions that are known to not transfer control.
2985   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2986 
2987   /// Collection of all assumed live BasicBlocks.
2988   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2989 };
2990 
2991 static bool
2992 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2993                         AbstractAttribute &AA,
2994                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2995   const IRPosition &IPos = IRPosition::callsite_function(CB);
2996 
2997   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2998   if (NoReturnAA.isAssumedNoReturn())
2999     return !NoReturnAA.isKnownNoReturn();
3000   if (CB.isTerminator())
3001     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3002   else
3003     AliveSuccessors.push_back(CB.getNextNode());
3004   return false;
3005 }
3006 
3007 static bool
3008 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3009                         AbstractAttribute &AA,
3010                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3011   bool UsedAssumedInformation =
3012       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3013 
3014   // First, determine if we can change an invoke to a call assuming the
3015   // callee is nounwind. This is not possible if the personality of the
3016   // function allows to catch asynchronous exceptions.
3017   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3018     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3019   } else {
3020     const IRPosition &IPos = IRPosition::callsite_function(II);
3021     const auto &AANoUnw = A.getAAFor<AANoUnwind>(AA, IPos);
3022     if (AANoUnw.isAssumedNoUnwind()) {
3023       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3024     } else {
3025       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3026     }
3027   }
3028   return UsedAssumedInformation;
3029 }
3030 
3031 static bool
3032 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3033                         AbstractAttribute &AA,
3034                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3035   bool UsedAssumedInformation = false;
3036   if (BI.getNumSuccessors() == 1) {
3037     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3038   } else {
3039     Optional<ConstantInt *> CI = getAssumedConstantInt(
3040         A, *BI.getCondition(), AA, UsedAssumedInformation);
3041     if (!CI.hasValue()) {
3042       // No value yet, assume both edges are dead.
3043     } else if (CI.getValue()) {
3044       const BasicBlock *SuccBB =
3045           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3046       AliveSuccessors.push_back(&SuccBB->front());
3047     } else {
3048       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3049       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3050       UsedAssumedInformation = false;
3051     }
3052   }
3053   return UsedAssumedInformation;
3054 }
3055 
3056 static bool
3057 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3058                         AbstractAttribute &AA,
3059                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3060   bool UsedAssumedInformation = false;
3061   Optional<ConstantInt *> CI =
3062       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3063   if (!CI.hasValue()) {
3064     // No value yet, assume all edges are dead.
3065   } else if (CI.getValue()) {
3066     for (auto &CaseIt : SI.cases()) {
3067       if (CaseIt.getCaseValue() == CI.getValue()) {
3068         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3069         return UsedAssumedInformation;
3070       }
3071     }
3072     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3073     return UsedAssumedInformation;
3074   } else {
3075     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3076       AliveSuccessors.push_back(&SuccBB->front());
3077   }
3078   return UsedAssumedInformation;
3079 }
3080 
3081 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3082   ChangeStatus Change = ChangeStatus::UNCHANGED;
3083 
3084   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3085                     << getAnchorScope()->size() << "] BBs and "
3086                     << ToBeExploredFrom.size() << " exploration points and "
3087                     << KnownDeadEnds.size() << " known dead ends\n");
3088 
3089   // Copy and clear the list of instructions we need to explore from. It is
3090   // refilled with instructions the next update has to look at.
3091   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3092                                                ToBeExploredFrom.end());
3093   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3094 
3095   SmallVector<const Instruction *, 8> AliveSuccessors;
3096   while (!Worklist.empty()) {
3097     const Instruction *I = Worklist.pop_back_val();
3098     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3099 
3100     AliveSuccessors.clear();
3101 
3102     bool UsedAssumedInformation = false;
3103     switch (I->getOpcode()) {
3104     // TODO: look for (assumed) UB to backwards propagate "deadness".
3105     default:
3106       if (I->isTerminator()) {
3107         for (const BasicBlock *SuccBB : successors(I->getParent()))
3108           AliveSuccessors.push_back(&SuccBB->front());
3109       } else {
3110         AliveSuccessors.push_back(I->getNextNode());
3111       }
3112       break;
3113     case Instruction::Call:
3114       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3115                                                        *this, AliveSuccessors);
3116       break;
3117     case Instruction::Invoke:
3118       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3119                                                        *this, AliveSuccessors);
3120       break;
3121     case Instruction::Br:
3122       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3123                                                        *this, AliveSuccessors);
3124       break;
3125     case Instruction::Switch:
3126       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3127                                                        *this, AliveSuccessors);
3128       break;
3129     }
3130 
3131     if (UsedAssumedInformation) {
3132       NewToBeExploredFrom.insert(I);
3133     } else {
3134       Change = ChangeStatus::CHANGED;
3135       if (AliveSuccessors.empty() ||
3136           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3137         KnownDeadEnds.insert(I);
3138     }
3139 
3140     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3141                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3142                       << UsedAssumedInformation << "\n");
3143 
3144     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3145       if (!I->isTerminator()) {
3146         assert(AliveSuccessors.size() == 1 &&
3147                "Non-terminator expected to have a single successor!");
3148         Worklist.push_back(AliveSuccessor);
3149       } else {
3150         if (assumeLive(A, *AliveSuccessor->getParent()))
3151           Worklist.push_back(AliveSuccessor);
3152       }
3153     }
3154   }
3155 
3156   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3157 
3158   // If we know everything is live there is no need to query for liveness.
3159   // Instead, indicating a pessimistic fixpoint will cause the state to be
3160   // "invalid" and all queries to be answered conservatively without lookups.
3161   // To be in this state we have to (1) finished the exploration and (3) not
3162   // discovered any non-trivial dead end and (2) not ruled unreachable code
3163   // dead.
3164   if (ToBeExploredFrom.empty() &&
3165       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3166       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3167         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3168       }))
3169     return indicatePessimisticFixpoint();
3170   return Change;
3171 }
3172 
3173 /// Liveness information for a call sites.
3174 struct AAIsDeadCallSite final : AAIsDeadFunction {
3175   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3176       : AAIsDeadFunction(IRP, A) {}
3177 
3178   /// See AbstractAttribute::initialize(...).
3179   void initialize(Attributor &A) override {
3180     // TODO: Once we have call site specific value information we can provide
3181     //       call site specific liveness information and then it makes
3182     //       sense to specialize attributes for call sites instead of
3183     //       redirecting requests to the callee.
3184     llvm_unreachable("Abstract attributes for liveness are not "
3185                      "supported for call sites yet!");
3186   }
3187 
3188   /// See AbstractAttribute::updateImpl(...).
3189   ChangeStatus updateImpl(Attributor &A) override {
3190     return indicatePessimisticFixpoint();
3191   }
3192 
3193   /// See AbstractAttribute::trackStatistics()
3194   void trackStatistics() const override {}
3195 };
3196 
3197 /// -------------------- Dereferenceable Argument Attribute --------------------
3198 
3199 template <>
3200 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3201                                                      const DerefState &R) {
3202   ChangeStatus CS0 =
3203       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3204   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3205   return CS0 | CS1;
3206 }
3207 
3208 struct AADereferenceableImpl : AADereferenceable {
3209   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3210       : AADereferenceable(IRP, A) {}
3211   using StateType = DerefState;
3212 
3213   /// See AbstractAttribute::initialize(...).
3214   void initialize(Attributor &A) override {
3215     SmallVector<Attribute, 4> Attrs;
3216     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3217              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3218     for (const Attribute &Attr : Attrs)
3219       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3220 
3221     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3222                                        /* TrackDependence */ false);
3223 
3224     const IRPosition &IRP = this->getIRPosition();
3225     bool IsFnInterface = IRP.isFnInterfaceKind();
3226     Function *FnScope = IRP.getAnchorScope();
3227     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3228       indicatePessimisticFixpoint();
3229       return;
3230     }
3231 
3232     if (Instruction *CtxI = getCtxI())
3233       followUsesInMBEC(*this, A, getState(), *CtxI);
3234   }
3235 
3236   /// See AbstractAttribute::getState()
3237   /// {
3238   StateType &getState() override { return *this; }
3239   const StateType &getState() const override { return *this; }
3240   /// }
3241 
3242   /// Helper function for collecting accessed bytes in must-be-executed-context
3243   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3244                               DerefState &State) {
3245     const Value *UseV = U->get();
3246     if (!UseV->getType()->isPointerTy())
3247       return;
3248 
3249     Type *PtrTy = UseV->getType();
3250     const DataLayout &DL = A.getDataLayout();
3251     int64_t Offset;
3252     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3253             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3254       if (Base == &getAssociatedValue() &&
3255           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3256         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3257         State.addAccessedBytes(Offset, Size);
3258       }
3259     }
3260     return;
3261   }
3262 
3263   /// See followUsesInMBEC
3264   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3265                        AADereferenceable::StateType &State) {
3266     bool IsNonNull = false;
3267     bool TrackUse = false;
3268     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3269         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3270 
3271     addAccessedBytesForUse(A, U, I, State);
3272     State.takeKnownDerefBytesMaximum(DerefBytes);
3273     return TrackUse;
3274   }
3275 
3276   /// See AbstractAttribute::manifest(...).
3277   ChangeStatus manifest(Attributor &A) override {
3278     ChangeStatus Change = AADereferenceable::manifest(A);
3279     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3280       removeAttrs({Attribute::DereferenceableOrNull});
3281       return ChangeStatus::CHANGED;
3282     }
3283     return Change;
3284   }
3285 
3286   void getDeducedAttributes(LLVMContext &Ctx,
3287                             SmallVectorImpl<Attribute> &Attrs) const override {
3288     // TODO: Add *_globally support
3289     if (isAssumedNonNull())
3290       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3291           Ctx, getAssumedDereferenceableBytes()));
3292     else
3293       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3294           Ctx, getAssumedDereferenceableBytes()));
3295   }
3296 
3297   /// See AbstractAttribute::getAsStr().
3298   const std::string getAsStr() const override {
3299     if (!getAssumedDereferenceableBytes())
3300       return "unknown-dereferenceable";
3301     return std::string("dereferenceable") +
3302            (isAssumedNonNull() ? "" : "_or_null") +
3303            (isAssumedGlobal() ? "_globally" : "") + "<" +
3304            std::to_string(getKnownDereferenceableBytes()) + "-" +
3305            std::to_string(getAssumedDereferenceableBytes()) + ">";
3306   }
3307 };
3308 
3309 /// Dereferenceable attribute for a floating value.
3310 struct AADereferenceableFloating : AADereferenceableImpl {
3311   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3312       : AADereferenceableImpl(IRP, A) {}
3313 
3314   /// See AbstractAttribute::updateImpl(...).
3315   ChangeStatus updateImpl(Attributor &A) override {
3316     const DataLayout &DL = A.getDataLayout();
3317 
3318     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3319                             bool Stripped) -> bool {
3320       unsigned IdxWidth =
3321           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3322       APInt Offset(IdxWidth, 0);
3323       const Value *Base =
3324           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3325 
3326       const auto &AA =
3327           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3328       int64_t DerefBytes = 0;
3329       if (!Stripped && this == &AA) {
3330         // Use IR information if we did not strip anything.
3331         // TODO: track globally.
3332         bool CanBeNull;
3333         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3334         T.GlobalState.indicatePessimisticFixpoint();
3335       } else {
3336         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3337         DerefBytes = DS.DerefBytesState.getAssumed();
3338         T.GlobalState &= DS.GlobalState;
3339       }
3340 
3341       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3342 
3343       // For now we do not try to "increase" dereferenceability due to negative
3344       // indices as we first have to come up with code to deal with loops and
3345       // for overflows of the dereferenceable bytes.
3346       int64_t OffsetSExt = Offset.getSExtValue();
3347       if (OffsetSExt < 0)
3348         OffsetSExt = 0;
3349 
3350       T.takeAssumedDerefBytesMinimum(
3351           std::max(int64_t(0), DerefBytes - OffsetSExt));
3352 
3353       if (this == &AA) {
3354         if (!Stripped) {
3355           // If nothing was stripped IR information is all we got.
3356           T.takeKnownDerefBytesMaximum(
3357               std::max(int64_t(0), DerefBytes - OffsetSExt));
3358           T.indicatePessimisticFixpoint();
3359         } else if (OffsetSExt > 0) {
3360           // If something was stripped but there is circular reasoning we look
3361           // for the offset. If it is positive we basically decrease the
3362           // dereferenceable bytes in a circluar loop now, which will simply
3363           // drive them down to the known value in a very slow way which we
3364           // can accelerate.
3365           T.indicatePessimisticFixpoint();
3366         }
3367       }
3368 
3369       return T.isValidState();
3370     };
3371 
3372     DerefState T;
3373     if (!genericValueTraversal<AADereferenceable, DerefState>(
3374             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3375       return indicatePessimisticFixpoint();
3376 
3377     return clampStateAndIndicateChange(getState(), T);
3378   }
3379 
3380   /// See AbstractAttribute::trackStatistics()
3381   void trackStatistics() const override {
3382     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3383   }
3384 };
3385 
3386 /// Dereferenceable attribute for a return value.
3387 struct AADereferenceableReturned final
3388     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3389   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3390       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3391             IRP, A) {}
3392 
3393   /// See AbstractAttribute::trackStatistics()
3394   void trackStatistics() const override {
3395     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3396   }
3397 };
3398 
3399 /// Dereferenceable attribute for an argument
3400 struct AADereferenceableArgument final
3401     : AAArgumentFromCallSiteArguments<AADereferenceable,
3402                                       AADereferenceableImpl> {
3403   using Base =
3404       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3405   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3406       : Base(IRP, A) {}
3407 
3408   /// See AbstractAttribute::trackStatistics()
3409   void trackStatistics() const override {
3410     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3411   }
3412 };
3413 
3414 /// Dereferenceable attribute for a call site argument.
3415 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3416   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3417       : AADereferenceableFloating(IRP, A) {}
3418 
3419   /// See AbstractAttribute::trackStatistics()
3420   void trackStatistics() const override {
3421     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3422   }
3423 };
3424 
3425 /// Dereferenceable attribute deduction for a call site return value.
3426 struct AADereferenceableCallSiteReturned final
3427     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3428   using Base =
3429       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3430   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3431       : Base(IRP, A) {}
3432 
3433   /// See AbstractAttribute::trackStatistics()
3434   void trackStatistics() const override {
3435     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3436   }
3437 };
3438 
3439 // ------------------------ Align Argument Attribute ------------------------
3440 
3441 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3442 /// the element type to be aligned.
3443 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3444                                                    const DataLayout &DL) {
3445   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3446   Type *ElementTy = Ptr->getType()->getPointerElementType();
3447   if (ElementTy->isSized())
3448     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3449   return KnownAlignment;
3450 }
3451 
3452 static unsigned getKnownAlignForUse(Attributor &A,
3453                                     AbstractAttribute &QueryingAA,
3454                                     Value &AssociatedValue, const Use *U,
3455                                     const Instruction *I, bool &TrackUse) {
3456   // We need to follow common pointer manipulation uses to the accesses they
3457   // feed into.
3458   if (isa<CastInst>(I)) {
3459     // Follow all but ptr2int casts.
3460     TrackUse = !isa<PtrToIntInst>(I);
3461     return 0;
3462   }
3463   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3464     if (GEP->hasAllConstantIndices()) {
3465       TrackUse = true;
3466       return 0;
3467     }
3468   }
3469 
3470   MaybeAlign MA;
3471   if (const auto *CB = dyn_cast<CallBase>(I)) {
3472     if (CB->isBundleOperand(U) || CB->isCallee(U))
3473       return 0;
3474 
3475     unsigned ArgNo = CB->getArgOperandNo(U);
3476     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3477     // As long as we only use known information there is no need to track
3478     // dependences here.
3479     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3480                                         /* TrackDependence */ false);
3481     MA = MaybeAlign(AlignAA.getKnownAlign());
3482   }
3483 
3484   const DataLayout &DL = A.getDataLayout();
3485   const Value *UseV = U->get();
3486   if (auto *SI = dyn_cast<StoreInst>(I)) {
3487     if (SI->getPointerOperand() == UseV) {
3488       if (unsigned SIAlign = SI->getAlignment())
3489         MA = MaybeAlign(SIAlign);
3490       else
3491         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3492     }
3493   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3494     if (LI->getPointerOperand() == UseV) {
3495       if (unsigned LIAlign = LI->getAlignment())
3496         MA = MaybeAlign(LIAlign);
3497       else
3498         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3499     }
3500   }
3501 
3502   if (!MA.hasValue() || MA <= 1)
3503     return 0;
3504 
3505   unsigned Alignment = MA->value();
3506   int64_t Offset;
3507 
3508   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3509     if (Base == &AssociatedValue) {
3510       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3511       // So we can say that the maximum power of two which is a divisor of
3512       // gcd(Offset, Alignment) is an alignment.
3513 
3514       uint32_t gcd =
3515           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3516       Alignment = llvm::PowerOf2Floor(gcd);
3517     }
3518   }
3519 
3520   return Alignment;
3521 }
3522 
3523 struct AAAlignImpl : AAAlign {
3524   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3525 
3526   /// See AbstractAttribute::initialize(...).
3527   void initialize(Attributor &A) override {
3528     SmallVector<Attribute, 4> Attrs;
3529     getAttrs({Attribute::Alignment}, Attrs);
3530     for (const Attribute &Attr : Attrs)
3531       takeKnownMaximum(Attr.getValueAsInt());
3532 
3533     if (getIRPosition().isFnInterfaceKind() &&
3534         (!getAnchorScope() ||
3535          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3536       indicatePessimisticFixpoint();
3537       return;
3538     }
3539 
3540     if (Instruction *CtxI = getCtxI())
3541       followUsesInMBEC(*this, A, getState(), *CtxI);
3542   }
3543 
3544   /// See AbstractAttribute::manifest(...).
3545   ChangeStatus manifest(Attributor &A) override {
3546     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3547 
3548     // Check for users that allow alignment annotations.
3549     Value &AssociatedValue = getAssociatedValue();
3550     for (const Use &U : AssociatedValue.uses()) {
3551       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3552         if (SI->getPointerOperand() == &AssociatedValue)
3553           if (SI->getAlignment() < getAssumedAlign()) {
3554             STATS_DECLTRACK(AAAlign, Store,
3555                             "Number of times alignment added to a store");
3556             SI->setAlignment(Align(getAssumedAlign()));
3557             LoadStoreChanged = ChangeStatus::CHANGED;
3558           }
3559       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3560         if (LI->getPointerOperand() == &AssociatedValue)
3561           if (LI->getAlignment() < getAssumedAlign()) {
3562             LI->setAlignment(Align(getAssumedAlign()));
3563             STATS_DECLTRACK(AAAlign, Load,
3564                             "Number of times alignment added to a load");
3565             LoadStoreChanged = ChangeStatus::CHANGED;
3566           }
3567       }
3568     }
3569 
3570     ChangeStatus Changed = AAAlign::manifest(A);
3571 
3572     MaybeAlign InheritAlign =
3573         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3574     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3575       return LoadStoreChanged;
3576     return Changed | LoadStoreChanged;
3577   }
3578 
3579   // TODO: Provide a helper to determine the implied ABI alignment and check in
3580   //       the existing manifest method and a new one for AAAlignImpl that value
3581   //       to avoid making the alignment explicit if it did not improve.
3582 
3583   /// See AbstractAttribute::getDeducedAttributes
3584   virtual void
3585   getDeducedAttributes(LLVMContext &Ctx,
3586                        SmallVectorImpl<Attribute> &Attrs) const override {
3587     if (getAssumedAlign() > 1)
3588       Attrs.emplace_back(
3589           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3590   }
3591 
3592   /// See followUsesInMBEC
3593   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3594                        AAAlign::StateType &State) {
3595     bool TrackUse = false;
3596 
3597     unsigned int KnownAlign =
3598         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3599     State.takeKnownMaximum(KnownAlign);
3600 
3601     return TrackUse;
3602   }
3603 
3604   /// See AbstractAttribute::getAsStr().
3605   const std::string getAsStr() const override {
3606     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3607                                 "-" + std::to_string(getAssumedAlign()) + ">")
3608                              : "unknown-align";
3609   }
3610 };
3611 
3612 /// Align attribute for a floating value.
3613 struct AAAlignFloating : AAAlignImpl {
3614   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3615 
3616   /// See AbstractAttribute::updateImpl(...).
3617   ChangeStatus updateImpl(Attributor &A) override {
3618     const DataLayout &DL = A.getDataLayout();
3619 
3620     auto VisitValueCB = [&](Value &V, const Instruction *,
3621                             AAAlign::StateType &T, bool Stripped) -> bool {
3622       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3623       if (!Stripped && this == &AA) {
3624         // Use only IR information if we did not strip anything.
3625         const MaybeAlign PA = V.getPointerAlignment(DL);
3626         T.takeKnownMaximum(PA ? PA->value() : 0);
3627         T.indicatePessimisticFixpoint();
3628       } else {
3629         // Use abstract attribute information.
3630         const AAAlign::StateType &DS =
3631             static_cast<const AAAlign::StateType &>(AA.getState());
3632         T ^= DS;
3633       }
3634       return T.isValidState();
3635     };
3636 
3637     StateType T;
3638     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3639                                                    VisitValueCB, getCtxI()))
3640       return indicatePessimisticFixpoint();
3641 
3642     // TODO: If we know we visited all incoming values, thus no are assumed
3643     // dead, we can take the known information from the state T.
3644     return clampStateAndIndicateChange(getState(), T);
3645   }
3646 
3647   /// See AbstractAttribute::trackStatistics()
3648   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3649 };
3650 
3651 /// Align attribute for function return value.
3652 struct AAAlignReturned final
3653     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3654   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3655       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3656 
3657   /// See AbstractAttribute::trackStatistics()
3658   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3659 };
3660 
3661 /// Align attribute for function argument.
3662 struct AAAlignArgument final
3663     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3664   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3665   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3666 
3667   /// See AbstractAttribute::manifest(...).
3668   ChangeStatus manifest(Attributor &A) override {
3669     // If the associated argument is involved in a must-tail call we give up
3670     // because we would need to keep the argument alignments of caller and
3671     // callee in-sync. Just does not seem worth the trouble right now.
3672     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3673       return ChangeStatus::UNCHANGED;
3674     return Base::manifest(A);
3675   }
3676 
3677   /// See AbstractAttribute::trackStatistics()
3678   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3679 };
3680 
3681 struct AAAlignCallSiteArgument final : AAAlignFloating {
3682   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3683       : AAAlignFloating(IRP, A) {}
3684 
3685   /// See AbstractAttribute::manifest(...).
3686   ChangeStatus manifest(Attributor &A) override {
3687     // If the associated argument is involved in a must-tail call we give up
3688     // because we would need to keep the argument alignments of caller and
3689     // callee in-sync. Just does not seem worth the trouble right now.
3690     if (Argument *Arg = getAssociatedArgument())
3691       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3692         return ChangeStatus::UNCHANGED;
3693     ChangeStatus Changed = AAAlignImpl::manifest(A);
3694     MaybeAlign InheritAlign =
3695         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3696     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3697       Changed = ChangeStatus::UNCHANGED;
3698     return Changed;
3699   }
3700 
3701   /// See AbstractAttribute::updateImpl(Attributor &A).
3702   ChangeStatus updateImpl(Attributor &A) override {
3703     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3704     if (Argument *Arg = getAssociatedArgument()) {
3705       // We only take known information from the argument
3706       // so we do not need to track a dependence.
3707       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3708           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3709       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3710     }
3711     return Changed;
3712   }
3713 
3714   /// See AbstractAttribute::trackStatistics()
3715   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3716 };
3717 
3718 /// Align attribute deduction for a call site return value.
3719 struct AAAlignCallSiteReturned final
3720     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3721   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3722   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3723       : Base(IRP, A) {}
3724 
3725   /// See AbstractAttribute::initialize(...).
3726   void initialize(Attributor &A) override {
3727     Base::initialize(A);
3728     Function *F = getAssociatedFunction();
3729     if (!F)
3730       indicatePessimisticFixpoint();
3731   }
3732 
3733   /// See AbstractAttribute::trackStatistics()
3734   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3735 };
3736 
3737 /// ------------------ Function No-Return Attribute ----------------------------
3738 struct AANoReturnImpl : public AANoReturn {
3739   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3740 
3741   /// See AbstractAttribute::initialize(...).
3742   void initialize(Attributor &A) override {
3743     AANoReturn::initialize(A);
3744     Function *F = getAssociatedFunction();
3745     if (!F)
3746       indicatePessimisticFixpoint();
3747   }
3748 
3749   /// See AbstractAttribute::getAsStr().
3750   const std::string getAsStr() const override {
3751     return getAssumed() ? "noreturn" : "may-return";
3752   }
3753 
3754   /// See AbstractAttribute::updateImpl(Attributor &A).
3755   virtual ChangeStatus updateImpl(Attributor &A) override {
3756     auto CheckForNoReturn = [](Instruction &) { return false; };
3757     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3758                                    {(unsigned)Instruction::Ret}))
3759       return indicatePessimisticFixpoint();
3760     return ChangeStatus::UNCHANGED;
3761   }
3762 };
3763 
3764 struct AANoReturnFunction final : AANoReturnImpl {
3765   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3766       : AANoReturnImpl(IRP, A) {}
3767 
3768   /// See AbstractAttribute::trackStatistics()
3769   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3770 };
3771 
3772 /// NoReturn attribute deduction for a call sites.
3773 struct AANoReturnCallSite final : AANoReturnImpl {
3774   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3775       : AANoReturnImpl(IRP, A) {}
3776 
3777   /// See AbstractAttribute::updateImpl(...).
3778   ChangeStatus updateImpl(Attributor &A) override {
3779     // TODO: Once we have call site specific value information we can provide
3780     //       call site specific liveness information and then it makes
3781     //       sense to specialize attributes for call sites arguments instead of
3782     //       redirecting requests to the callee argument.
3783     Function *F = getAssociatedFunction();
3784     const IRPosition &FnPos = IRPosition::function(*F);
3785     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3786     return clampStateAndIndicateChange(
3787         getState(),
3788         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3789   }
3790 
3791   /// See AbstractAttribute::trackStatistics()
3792   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3793 };
3794 
3795 /// ----------------------- Variable Capturing ---------------------------------
3796 
3797 /// A class to hold the state of for no-capture attributes.
3798 struct AANoCaptureImpl : public AANoCapture {
3799   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3800 
3801   /// See AbstractAttribute::initialize(...).
3802   void initialize(Attributor &A) override {
3803     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3804       indicateOptimisticFixpoint();
3805       return;
3806     }
3807     Function *AnchorScope = getAnchorScope();
3808     if (isFnInterfaceKind() &&
3809         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3810       indicatePessimisticFixpoint();
3811       return;
3812     }
3813 
3814     // You cannot "capture" null in the default address space.
3815     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3816         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3817       indicateOptimisticFixpoint();
3818       return;
3819     }
3820 
3821     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3822 
3823     // Check what state the associated function can actually capture.
3824     if (F)
3825       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3826     else
3827       indicatePessimisticFixpoint();
3828   }
3829 
3830   /// See AbstractAttribute::updateImpl(...).
3831   ChangeStatus updateImpl(Attributor &A) override;
3832 
3833   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3834   virtual void
3835   getDeducedAttributes(LLVMContext &Ctx,
3836                        SmallVectorImpl<Attribute> &Attrs) const override {
3837     if (!isAssumedNoCaptureMaybeReturned())
3838       return;
3839 
3840     if (getArgNo() >= 0) {
3841       if (isAssumedNoCapture())
3842         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3843       else if (ManifestInternal)
3844         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3845     }
3846   }
3847 
3848   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3849   /// depending on the ability of the function associated with \p IRP to capture
3850   /// state in memory and through "returning/throwing", respectively.
3851   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3852                                                    const Function &F,
3853                                                    BitIntegerState &State) {
3854     // TODO: Once we have memory behavior attributes we should use them here.
3855 
3856     // If we know we cannot communicate or write to memory, we do not care about
3857     // ptr2int anymore.
3858     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3859         F.getReturnType()->isVoidTy()) {
3860       State.addKnownBits(NO_CAPTURE);
3861       return;
3862     }
3863 
3864     // A function cannot capture state in memory if it only reads memory, it can
3865     // however return/throw state and the state might be influenced by the
3866     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3867     if (F.onlyReadsMemory())
3868       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3869 
3870     // A function cannot communicate state back if it does not through
3871     // exceptions and doesn not return values.
3872     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3873       State.addKnownBits(NOT_CAPTURED_IN_RET);
3874 
3875     // Check existing "returned" attributes.
3876     int ArgNo = IRP.getArgNo();
3877     if (F.doesNotThrow() && ArgNo >= 0) {
3878       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3879         if (F.hasParamAttribute(u, Attribute::Returned)) {
3880           if (u == unsigned(ArgNo))
3881             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3882           else if (F.onlyReadsMemory())
3883             State.addKnownBits(NO_CAPTURE);
3884           else
3885             State.addKnownBits(NOT_CAPTURED_IN_RET);
3886           break;
3887         }
3888     }
3889   }
3890 
3891   /// See AbstractState::getAsStr().
3892   const std::string getAsStr() const override {
3893     if (isKnownNoCapture())
3894       return "known not-captured";
3895     if (isAssumedNoCapture())
3896       return "assumed not-captured";
3897     if (isKnownNoCaptureMaybeReturned())
3898       return "known not-captured-maybe-returned";
3899     if (isAssumedNoCaptureMaybeReturned())
3900       return "assumed not-captured-maybe-returned";
3901     return "assumed-captured";
3902   }
3903 };
3904 
3905 /// Attributor-aware capture tracker.
3906 struct AACaptureUseTracker final : public CaptureTracker {
3907 
3908   /// Create a capture tracker that can lookup in-flight abstract attributes
3909   /// through the Attributor \p A.
3910   ///
3911   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3912   /// search is stopped. If a use leads to a return instruction,
3913   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3914   /// If a use leads to a ptr2int which may capture the value,
3915   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3916   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3917   /// set. All values in \p PotentialCopies are later tracked as well. For every
3918   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3919   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3920   /// conservatively set to true.
3921   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3922                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3923                       SmallVectorImpl<const Value *> &PotentialCopies,
3924                       unsigned &RemainingUsesToExplore)
3925       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3926         PotentialCopies(PotentialCopies),
3927         RemainingUsesToExplore(RemainingUsesToExplore) {}
3928 
3929   /// Determine if \p V maybe captured. *Also updates the state!*
3930   bool valueMayBeCaptured(const Value *V) {
3931     if (V->getType()->isPointerTy()) {
3932       PointerMayBeCaptured(V, this);
3933     } else {
3934       State.indicatePessimisticFixpoint();
3935     }
3936     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3937   }
3938 
3939   /// See CaptureTracker::tooManyUses().
3940   void tooManyUses() override {
3941     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3942   }
3943 
3944   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3945     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3946       return true;
3947     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3948         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3949         DepClassTy::OPTIONAL);
3950     return DerefAA.getAssumedDereferenceableBytes();
3951   }
3952 
3953   /// See CaptureTracker::captured(...).
3954   bool captured(const Use *U) override {
3955     Instruction *UInst = cast<Instruction>(U->getUser());
3956     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3957                       << "\n");
3958 
3959     // Because we may reuse the tracker multiple times we keep track of the
3960     // number of explored uses ourselves as well.
3961     if (RemainingUsesToExplore-- == 0) {
3962       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3963       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3964                           /* Return */ true);
3965     }
3966 
3967     // Deal with ptr2int by following uses.
3968     if (isa<PtrToIntInst>(UInst)) {
3969       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3970       return valueMayBeCaptured(UInst);
3971     }
3972 
3973     // Explicitly catch return instructions.
3974     if (isa<ReturnInst>(UInst))
3975       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3976                           /* Return */ true);
3977 
3978     // For now we only use special logic for call sites. However, the tracker
3979     // itself knows about a lot of other non-capturing cases already.
3980     auto *CB = dyn_cast<CallBase>(UInst);
3981     if (!CB || !CB->isArgOperand(U))
3982       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3983                           /* Return */ true);
3984 
3985     unsigned ArgNo = CB->getArgOperandNo(U);
3986     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
3987     // If we have a abstract no-capture attribute for the argument we can use
3988     // it to justify a non-capture attribute here. This allows recursion!
3989     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3990     if (ArgNoCaptureAA.isAssumedNoCapture())
3991       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3992                           /* Return */ false);
3993     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3994       addPotentialCopy(*CB);
3995       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3996                           /* Return */ false);
3997     }
3998 
3999     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4000     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4001                         /* Return */ true);
4002   }
4003 
4004   /// Register \p CS as potential copy of the value we are checking.
4005   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4006 
4007   /// See CaptureTracker::shouldExplore(...).
4008   bool shouldExplore(const Use *U) override {
4009     // Check liveness and ignore droppable users.
4010     return !U->getUser()->isDroppable() &&
4011            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4012   }
4013 
4014   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4015   /// \p CapturedInRet, then return the appropriate value for use in the
4016   /// CaptureTracker::captured() interface.
4017   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4018                     bool CapturedInRet) {
4019     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4020                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4021     if (CapturedInMem)
4022       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4023     if (CapturedInInt)
4024       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4025     if (CapturedInRet)
4026       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4027     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4028   }
4029 
4030 private:
4031   /// The attributor providing in-flight abstract attributes.
4032   Attributor &A;
4033 
4034   /// The abstract attribute currently updated.
4035   AANoCapture &NoCaptureAA;
4036 
4037   /// The abstract liveness state.
4038   const AAIsDead &IsDeadAA;
4039 
4040   /// The state currently updated.
4041   AANoCapture::StateType &State;
4042 
4043   /// Set of potential copies of the tracked value.
4044   SmallVectorImpl<const Value *> &PotentialCopies;
4045 
4046   /// Global counter to limit the number of explored uses.
4047   unsigned &RemainingUsesToExplore;
4048 };
4049 
4050 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4051   const IRPosition &IRP = getIRPosition();
4052   const Value *V =
4053       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4054   if (!V)
4055     return indicatePessimisticFixpoint();
4056 
4057   const Function *F =
4058       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4059   assert(F && "Expected a function!");
4060   const IRPosition &FnPos = IRPosition::function(*F);
4061   const auto &IsDeadAA =
4062       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4063 
4064   AANoCapture::StateType T;
4065 
4066   // Readonly means we cannot capture through memory.
4067   const auto &FnMemAA =
4068       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4069   if (FnMemAA.isAssumedReadOnly()) {
4070     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4071     if (FnMemAA.isKnownReadOnly())
4072       addKnownBits(NOT_CAPTURED_IN_MEM);
4073     else
4074       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4075   }
4076 
4077   // Make sure all returned values are different than the underlying value.
4078   // TODO: we could do this in a more sophisticated way inside
4079   //       AAReturnedValues, e.g., track all values that escape through returns
4080   //       directly somehow.
4081   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4082     bool SeenConstant = false;
4083     for (auto &It : RVAA.returned_values()) {
4084       if (isa<Constant>(It.first)) {
4085         if (SeenConstant)
4086           return false;
4087         SeenConstant = true;
4088       } else if (!isa<Argument>(It.first) ||
4089                  It.first == getAssociatedArgument())
4090         return false;
4091     }
4092     return true;
4093   };
4094 
4095   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4096       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4097   if (NoUnwindAA.isAssumedNoUnwind()) {
4098     bool IsVoidTy = F->getReturnType()->isVoidTy();
4099     const AAReturnedValues *RVAA =
4100         IsVoidTy ? nullptr
4101                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4102                                                  /* TrackDependence */ true,
4103                                                  DepClassTy::OPTIONAL);
4104     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4105       T.addKnownBits(NOT_CAPTURED_IN_RET);
4106       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4107         return ChangeStatus::UNCHANGED;
4108       if (NoUnwindAA.isKnownNoUnwind() &&
4109           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4110         addKnownBits(NOT_CAPTURED_IN_RET);
4111         if (isKnown(NOT_CAPTURED_IN_MEM))
4112           return indicateOptimisticFixpoint();
4113       }
4114     }
4115   }
4116 
4117   // Use the CaptureTracker interface and logic with the specialized tracker,
4118   // defined in AACaptureUseTracker, that can look at in-flight abstract
4119   // attributes and directly updates the assumed state.
4120   SmallVector<const Value *, 4> PotentialCopies;
4121   unsigned RemainingUsesToExplore =
4122       getDefaultMaxUsesToExploreForCaptureTracking();
4123   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4124                               RemainingUsesToExplore);
4125 
4126   // Check all potential copies of the associated value until we can assume
4127   // none will be captured or we have to assume at least one might be.
4128   unsigned Idx = 0;
4129   PotentialCopies.push_back(V);
4130   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4131     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4132 
4133   AANoCapture::StateType &S = getState();
4134   auto Assumed = S.getAssumed();
4135   S.intersectAssumedBits(T.getAssumed());
4136   if (!isAssumedNoCaptureMaybeReturned())
4137     return indicatePessimisticFixpoint();
4138   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4139                                    : ChangeStatus::CHANGED;
4140 }
4141 
4142 /// NoCapture attribute for function arguments.
4143 struct AANoCaptureArgument final : AANoCaptureImpl {
4144   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4145       : AANoCaptureImpl(IRP, A) {}
4146 
4147   /// See AbstractAttribute::trackStatistics()
4148   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4149 };
4150 
4151 /// NoCapture attribute for call site arguments.
4152 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4153   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4154       : AANoCaptureImpl(IRP, A) {}
4155 
4156   /// See AbstractAttribute::initialize(...).
4157   void initialize(Attributor &A) override {
4158     if (Argument *Arg = getAssociatedArgument())
4159       if (Arg->hasByValAttr())
4160         indicateOptimisticFixpoint();
4161     AANoCaptureImpl::initialize(A);
4162   }
4163 
4164   /// See AbstractAttribute::updateImpl(...).
4165   ChangeStatus updateImpl(Attributor &A) override {
4166     // TODO: Once we have call site specific value information we can provide
4167     //       call site specific liveness information and then it makes
4168     //       sense to specialize attributes for call sites arguments instead of
4169     //       redirecting requests to the callee argument.
4170     Argument *Arg = getAssociatedArgument();
4171     if (!Arg)
4172       return indicatePessimisticFixpoint();
4173     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4174     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4175     return clampStateAndIndicateChange(
4176         getState(),
4177         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4178   }
4179 
4180   /// See AbstractAttribute::trackStatistics()
4181   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4182 };
4183 
4184 /// NoCapture attribute for floating values.
4185 struct AANoCaptureFloating final : AANoCaptureImpl {
4186   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4187       : AANoCaptureImpl(IRP, A) {}
4188 
4189   /// See AbstractAttribute::trackStatistics()
4190   void trackStatistics() const override {
4191     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4192   }
4193 };
4194 
4195 /// NoCapture attribute for function return value.
4196 struct AANoCaptureReturned final : AANoCaptureImpl {
4197   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4198       : AANoCaptureImpl(IRP, A) {
4199     llvm_unreachable("NoCapture is not applicable to function returns!");
4200   }
4201 
4202   /// See AbstractAttribute::initialize(...).
4203   void initialize(Attributor &A) override {
4204     llvm_unreachable("NoCapture is not applicable to function returns!");
4205   }
4206 
4207   /// See AbstractAttribute::updateImpl(...).
4208   ChangeStatus updateImpl(Attributor &A) override {
4209     llvm_unreachable("NoCapture is not applicable to function returns!");
4210   }
4211 
4212   /// See AbstractAttribute::trackStatistics()
4213   void trackStatistics() const override {}
4214 };
4215 
4216 /// NoCapture attribute deduction for a call site return value.
4217 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4218   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4219       : AANoCaptureImpl(IRP, A) {}
4220 
4221   /// See AbstractAttribute::trackStatistics()
4222   void trackStatistics() const override {
4223     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4224   }
4225 };
4226 
4227 /// ------------------ Value Simplify Attribute ----------------------------
4228 struct AAValueSimplifyImpl : AAValueSimplify {
4229   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4230       : AAValueSimplify(IRP, A) {}
4231 
4232   /// See AbstractAttribute::initialize(...).
4233   void initialize(Attributor &A) override {
4234     if (getAssociatedValue().getType()->isVoidTy())
4235       indicatePessimisticFixpoint();
4236   }
4237 
4238   /// See AbstractAttribute::getAsStr().
4239   const std::string getAsStr() const override {
4240     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4241                         : "not-simple";
4242   }
4243 
4244   /// See AbstractAttribute::trackStatistics()
4245   void trackStatistics() const override {}
4246 
4247   /// See AAValueSimplify::getAssumedSimplifiedValue()
4248   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4249     if (!getAssumed())
4250       return const_cast<Value *>(&getAssociatedValue());
4251     return SimplifiedAssociatedValue;
4252   }
4253 
4254   /// Helper function for querying AAValueSimplify and updating candicate.
4255   /// \param QueryingValue Value trying to unify with SimplifiedValue
4256   /// \param AccumulatedSimplifiedValue Current simplification result.
4257   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4258                              Value &QueryingValue,
4259                              Optional<Value *> &AccumulatedSimplifiedValue) {
4260     // FIXME: Add a typecast support.
4261 
4262     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4263         QueryingAA, IRPosition::value(QueryingValue));
4264 
4265     Optional<Value *> QueryingValueSimplified =
4266         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4267 
4268     if (!QueryingValueSimplified.hasValue())
4269       return true;
4270 
4271     if (!QueryingValueSimplified.getValue())
4272       return false;
4273 
4274     Value &QueryingValueSimplifiedUnwrapped =
4275         *QueryingValueSimplified.getValue();
4276 
4277     if (AccumulatedSimplifiedValue.hasValue() &&
4278         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4279         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4280       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4281     if (AccumulatedSimplifiedValue.hasValue() &&
4282         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4283       return true;
4284 
4285     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4286                       << " is assumed to be "
4287                       << QueryingValueSimplifiedUnwrapped << "\n");
4288 
4289     AccumulatedSimplifiedValue = QueryingValueSimplified;
4290     return true;
4291   }
4292 
4293   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4294     if (!getAssociatedValue().getType()->isIntegerTy())
4295       return false;
4296 
4297     const auto &ValueConstantRangeAA =
4298         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4299 
4300     Optional<ConstantInt *> COpt =
4301         ValueConstantRangeAA.getAssumedConstantInt(A);
4302     if (COpt.hasValue()) {
4303       if (auto *C = COpt.getValue())
4304         SimplifiedAssociatedValue = C;
4305       else
4306         return false;
4307     } else {
4308       SimplifiedAssociatedValue = llvm::None;
4309     }
4310     return true;
4311   }
4312 
4313   /// See AbstractAttribute::manifest(...).
4314   ChangeStatus manifest(Attributor &A) override {
4315     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4316 
4317     if (SimplifiedAssociatedValue.hasValue() &&
4318         !SimplifiedAssociatedValue.getValue())
4319       return Changed;
4320 
4321     Value &V = getAssociatedValue();
4322     auto *C = SimplifiedAssociatedValue.hasValue()
4323                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4324                   : UndefValue::get(V.getType());
4325     if (C) {
4326       // We can replace the AssociatedValue with the constant.
4327       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4328         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4329                           << " :: " << *this << "\n");
4330         if (A.changeValueAfterManifest(V, *C))
4331           Changed = ChangeStatus::CHANGED;
4332       }
4333     }
4334 
4335     return Changed | AAValueSimplify::manifest(A);
4336   }
4337 
4338   /// See AbstractState::indicatePessimisticFixpoint(...).
4339   ChangeStatus indicatePessimisticFixpoint() override {
4340     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4341     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4342     SimplifiedAssociatedValue = &getAssociatedValue();
4343     indicateOptimisticFixpoint();
4344     return ChangeStatus::CHANGED;
4345   }
4346 
4347 protected:
4348   // An assumed simplified value. Initially, it is set to Optional::None, which
4349   // means that the value is not clear under current assumption. If in the
4350   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4351   // returns orignal associated value.
4352   Optional<Value *> SimplifiedAssociatedValue;
4353 };
4354 
4355 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4356   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4357       : AAValueSimplifyImpl(IRP, A) {}
4358 
4359   void initialize(Attributor &A) override {
4360     AAValueSimplifyImpl::initialize(A);
4361     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4362       indicatePessimisticFixpoint();
4363     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4364                 /* IgnoreSubsumingPositions */ true))
4365       indicatePessimisticFixpoint();
4366 
4367     // FIXME: This is a hack to prevent us from propagating function poiner in
4368     // the new pass manager CGSCC pass as it creates call edges the
4369     // CallGraphUpdater cannot handle yet.
4370     Value &V = getAssociatedValue();
4371     if (V.getType()->isPointerTy() &&
4372         V.getType()->getPointerElementType()->isFunctionTy() &&
4373         !A.isModulePass())
4374       indicatePessimisticFixpoint();
4375   }
4376 
4377   /// See AbstractAttribute::updateImpl(...).
4378   ChangeStatus updateImpl(Attributor &A) override {
4379     // Byval is only replacable if it is readonly otherwise we would write into
4380     // the replaced value and not the copy that byval creates implicitly.
4381     Argument *Arg = getAssociatedArgument();
4382     if (Arg->hasByValAttr()) {
4383       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4384       //       there is no race by not copying a constant byval.
4385       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4386       if (!MemAA.isAssumedReadOnly())
4387         return indicatePessimisticFixpoint();
4388     }
4389 
4390     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4391 
4392     auto PredForCallSite = [&](AbstractCallSite ACS) {
4393       const IRPosition &ACSArgPos =
4394           IRPosition::callsite_argument(ACS, getArgNo());
4395       // Check if a coresponding argument was found or if it is on not
4396       // associated (which can happen for callback calls).
4397       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4398         return false;
4399 
4400       // We can only propagate thread independent values through callbacks.
4401       // This is different to direct/indirect call sites because for them we
4402       // know the thread executing the caller and callee is the same. For
4403       // callbacks this is not guaranteed, thus a thread dependent value could
4404       // be different for the caller and callee, making it invalid to propagate.
4405       Value &ArgOp = ACSArgPos.getAssociatedValue();
4406       if (ACS.isCallbackCall())
4407         if (auto *C = dyn_cast<Constant>(&ArgOp))
4408           if (C->isThreadDependent())
4409             return false;
4410       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4411     };
4412 
4413     bool AllCallSitesKnown;
4414     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4415                                 AllCallSitesKnown))
4416       if (!askSimplifiedValueForAAValueConstantRange(A))
4417         return indicatePessimisticFixpoint();
4418 
4419     // If a candicate was found in this update, return CHANGED.
4420     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4421                ? ChangeStatus::UNCHANGED
4422                : ChangeStatus ::CHANGED;
4423   }
4424 
4425   /// See AbstractAttribute::trackStatistics()
4426   void trackStatistics() const override {
4427     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4428   }
4429 };
4430 
4431 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4432   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4433       : AAValueSimplifyImpl(IRP, A) {}
4434 
4435   /// See AbstractAttribute::updateImpl(...).
4436   ChangeStatus updateImpl(Attributor &A) override {
4437     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4438 
4439     auto PredForReturned = [&](Value &V) {
4440       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4441     };
4442 
4443     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4444       if (!askSimplifiedValueForAAValueConstantRange(A))
4445         return indicatePessimisticFixpoint();
4446 
4447     // If a candicate was found in this update, return CHANGED.
4448     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4449                ? ChangeStatus::UNCHANGED
4450                : ChangeStatus ::CHANGED;
4451   }
4452 
4453   ChangeStatus manifest(Attributor &A) override {
4454     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4455 
4456     if (SimplifiedAssociatedValue.hasValue() &&
4457         !SimplifiedAssociatedValue.getValue())
4458       return Changed;
4459 
4460     Value &V = getAssociatedValue();
4461     auto *C = SimplifiedAssociatedValue.hasValue()
4462                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4463                   : UndefValue::get(V.getType());
4464     if (C) {
4465       auto PredForReturned =
4466           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4467             // We can replace the AssociatedValue with the constant.
4468             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4469               return true;
4470 
4471             for (ReturnInst *RI : RetInsts) {
4472               if (RI->getFunction() != getAnchorScope())
4473                 continue;
4474               auto *RC = C;
4475               if (RC->getType() != RI->getReturnValue()->getType())
4476                 RC = ConstantExpr::getBitCast(RC,
4477                                               RI->getReturnValue()->getType());
4478               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4479                                 << " in " << *RI << " :: " << *this << "\n");
4480               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4481                 Changed = ChangeStatus::CHANGED;
4482             }
4483             return true;
4484           };
4485       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4486     }
4487 
4488     return Changed | AAValueSimplify::manifest(A);
4489   }
4490 
4491   /// See AbstractAttribute::trackStatistics()
4492   void trackStatistics() const override {
4493     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4494   }
4495 };
4496 
4497 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4498   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4499       : AAValueSimplifyImpl(IRP, A) {}
4500 
4501   /// See AbstractAttribute::initialize(...).
4502   void initialize(Attributor &A) override {
4503     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4504     //        Needs investigation.
4505     // AAValueSimplifyImpl::initialize(A);
4506     Value &V = getAnchorValue();
4507 
4508     // TODO: add other stuffs
4509     if (isa<Constant>(V))
4510       indicatePessimisticFixpoint();
4511   }
4512 
4513   /// See AbstractAttribute::updateImpl(...).
4514   ChangeStatus updateImpl(Attributor &A) override {
4515     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4516 
4517     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4518                             bool Stripped) -> bool {
4519       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4520       if (!Stripped && this == &AA) {
4521         // TODO: Look the instruction and check recursively.
4522 
4523         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4524                           << "\n");
4525         return false;
4526       }
4527       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4528     };
4529 
4530     bool Dummy = false;
4531     if (!genericValueTraversal<AAValueSimplify, bool>(
4532             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4533       if (!askSimplifiedValueForAAValueConstantRange(A))
4534         return indicatePessimisticFixpoint();
4535 
4536     // If a candicate was found in this update, return CHANGED.
4537 
4538     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4539                ? ChangeStatus::UNCHANGED
4540                : ChangeStatus ::CHANGED;
4541   }
4542 
4543   /// See AbstractAttribute::trackStatistics()
4544   void trackStatistics() const override {
4545     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4546   }
4547 };
4548 
4549 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4550   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4551       : AAValueSimplifyImpl(IRP, A) {}
4552 
4553   /// See AbstractAttribute::initialize(...).
4554   void initialize(Attributor &A) override {
4555     SimplifiedAssociatedValue = &getAnchorValue();
4556     indicateOptimisticFixpoint();
4557   }
4558   /// See AbstractAttribute::initialize(...).
4559   ChangeStatus updateImpl(Attributor &A) override {
4560     llvm_unreachable(
4561         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4562   }
4563   /// See AbstractAttribute::trackStatistics()
4564   void trackStatistics() const override {
4565     STATS_DECLTRACK_FN_ATTR(value_simplify)
4566   }
4567 };
4568 
4569 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4570   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4571       : AAValueSimplifyFunction(IRP, A) {}
4572   /// See AbstractAttribute::trackStatistics()
4573   void trackStatistics() const override {
4574     STATS_DECLTRACK_CS_ATTR(value_simplify)
4575   }
4576 };
4577 
4578 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4579   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4580       : AAValueSimplifyReturned(IRP, A) {}
4581 
4582   /// See AbstractAttribute::manifest(...).
4583   ChangeStatus manifest(Attributor &A) override {
4584     return AAValueSimplifyImpl::manifest(A);
4585   }
4586 
4587   void trackStatistics() const override {
4588     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4589   }
4590 };
4591 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4592   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4593       : AAValueSimplifyFloating(IRP, A) {}
4594 
4595   void trackStatistics() const override {
4596     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4597   }
4598 };
4599 
4600 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4601 struct AAHeapToStackImpl : public AAHeapToStack {
4602   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4603       : AAHeapToStack(IRP, A) {}
4604 
4605   const std::string getAsStr() const override {
4606     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4607   }
4608 
4609   ChangeStatus manifest(Attributor &A) override {
4610     assert(getState().isValidState() &&
4611            "Attempted to manifest an invalid state!");
4612 
4613     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4614     Function *F = getAnchorScope();
4615     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4616 
4617     for (Instruction *MallocCall : MallocCalls) {
4618       // This malloc cannot be replaced.
4619       if (BadMallocCalls.count(MallocCall))
4620         continue;
4621 
4622       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4623         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4624         A.deleteAfterManifest(*FreeCall);
4625         HasChanged = ChangeStatus::CHANGED;
4626       }
4627 
4628       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4629                         << "\n");
4630 
4631       MaybeAlign Alignment;
4632       Constant *Size;
4633       if (isCallocLikeFn(MallocCall, TLI)) {
4634         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4635         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4636         APInt TotalSize = SizeT->getValue() * Num->getValue();
4637         Size =
4638             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4639       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4640         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4641         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4642                                    ->getValue()
4643                                    .getZExtValue());
4644       } else {
4645         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4646       }
4647 
4648       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4649       Instruction *AI =
4650           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4651                          "", MallocCall->getNextNode());
4652 
4653       if (AI->getType() != MallocCall->getType())
4654         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4655                              AI->getNextNode());
4656 
4657       A.changeValueAfterManifest(*MallocCall, *AI);
4658 
4659       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4660         auto *NBB = II->getNormalDest();
4661         BranchInst::Create(NBB, MallocCall->getParent());
4662         A.deleteAfterManifest(*MallocCall);
4663       } else {
4664         A.deleteAfterManifest(*MallocCall);
4665       }
4666 
4667       // Zero out the allocated memory if it was a calloc.
4668       if (isCallocLikeFn(MallocCall, TLI)) {
4669         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4670                                    AI->getNextNode());
4671         Value *Ops[] = {
4672             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4673             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4674 
4675         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4676         Module *M = F->getParent();
4677         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4678         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4679       }
4680       HasChanged = ChangeStatus::CHANGED;
4681     }
4682 
4683     return HasChanged;
4684   }
4685 
4686   /// Collection of all malloc calls in a function.
4687   SmallSetVector<Instruction *, 4> MallocCalls;
4688 
4689   /// Collection of malloc calls that cannot be converted.
4690   DenseSet<const Instruction *> BadMallocCalls;
4691 
4692   /// A map for each malloc call to the set of associated free calls.
4693   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4694 
4695   ChangeStatus updateImpl(Attributor &A) override;
4696 };
4697 
4698 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4699   const Function *F = getAnchorScope();
4700   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4701 
4702   MustBeExecutedContextExplorer &Explorer =
4703       A.getInfoCache().getMustBeExecutedContextExplorer();
4704 
4705   auto FreeCheck = [&](Instruction &I) {
4706     const auto &Frees = FreesForMalloc.lookup(&I);
4707     if (Frees.size() != 1)
4708       return false;
4709     Instruction *UniqueFree = *Frees.begin();
4710     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4711   };
4712 
4713   auto UsesCheck = [&](Instruction &I) {
4714     bool ValidUsesOnly = true;
4715     bool MustUse = true;
4716     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4717       Instruction *UserI = cast<Instruction>(U.getUser());
4718       if (isa<LoadInst>(UserI))
4719         return true;
4720       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4721         if (SI->getValueOperand() == U.get()) {
4722           LLVM_DEBUG(dbgs()
4723                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4724           ValidUsesOnly = false;
4725         } else {
4726           // A store into the malloc'ed memory is fine.
4727         }
4728         return true;
4729       }
4730       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4731         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4732           return true;
4733         // Record malloc.
4734         if (isFreeCall(UserI, TLI)) {
4735           if (MustUse) {
4736             FreesForMalloc[&I].insert(UserI);
4737           } else {
4738             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4739                               << *UserI << "\n");
4740             ValidUsesOnly = false;
4741           }
4742           return true;
4743         }
4744 
4745         unsigned ArgNo = CB->getArgOperandNo(&U);
4746 
4747         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4748             *this, IRPosition::callsite_argument(*CB, ArgNo));
4749 
4750         // If a callsite argument use is nofree, we are fine.
4751         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4752             *this, IRPosition::callsite_argument(*CB, ArgNo));
4753 
4754         if (!NoCaptureAA.isAssumedNoCapture() ||
4755             !ArgNoFreeAA.isAssumedNoFree()) {
4756           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4757           ValidUsesOnly = false;
4758         }
4759         return true;
4760       }
4761 
4762       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4763           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4764         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4765         Follow = true;
4766         return true;
4767       }
4768       // Unknown user for which we can not track uses further (in a way that
4769       // makes sense).
4770       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4771       ValidUsesOnly = false;
4772       return true;
4773     };
4774     A.checkForAllUses(Pred, *this, I);
4775     return ValidUsesOnly;
4776   };
4777 
4778   auto MallocCallocCheck = [&](Instruction &I) {
4779     if (BadMallocCalls.count(&I))
4780       return true;
4781 
4782     bool IsMalloc = isMallocLikeFn(&I, TLI);
4783     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4784     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4785     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4786       BadMallocCalls.insert(&I);
4787       return true;
4788     }
4789 
4790     if (IsMalloc) {
4791       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4792         if (Size->getValue().ule(MaxHeapToStackSize))
4793           if (UsesCheck(I) || FreeCheck(I)) {
4794             MallocCalls.insert(&I);
4795             return true;
4796           }
4797     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4798       // Only if the alignment and sizes are constant.
4799       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4800         if (Size->getValue().ule(MaxHeapToStackSize))
4801           if (UsesCheck(I) || FreeCheck(I)) {
4802             MallocCalls.insert(&I);
4803             return true;
4804           }
4805     } else if (IsCalloc) {
4806       bool Overflow = false;
4807       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4808         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4809           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4810                   .ule(MaxHeapToStackSize))
4811             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4812               MallocCalls.insert(&I);
4813               return true;
4814             }
4815     }
4816 
4817     BadMallocCalls.insert(&I);
4818     return true;
4819   };
4820 
4821   size_t NumBadMallocs = BadMallocCalls.size();
4822 
4823   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4824 
4825   if (NumBadMallocs != BadMallocCalls.size())
4826     return ChangeStatus::CHANGED;
4827 
4828   return ChangeStatus::UNCHANGED;
4829 }
4830 
4831 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4832   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4833       : AAHeapToStackImpl(IRP, A) {}
4834 
4835   /// See AbstractAttribute::trackStatistics().
4836   void trackStatistics() const override {
4837     STATS_DECL(
4838         MallocCalls, Function,
4839         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4840     for (auto *C : MallocCalls)
4841       if (!BadMallocCalls.count(C))
4842         ++BUILD_STAT_NAME(MallocCalls, Function);
4843   }
4844 };
4845 
4846 /// ----------------------- Privatizable Pointers ------------------------------
4847 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4848   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4849       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4850 
4851   ChangeStatus indicatePessimisticFixpoint() override {
4852     AAPrivatizablePtr::indicatePessimisticFixpoint();
4853     PrivatizableType = nullptr;
4854     return ChangeStatus::CHANGED;
4855   }
4856 
4857   /// Identify the type we can chose for a private copy of the underlying
4858   /// argument. None means it is not clear yet, nullptr means there is none.
4859   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4860 
4861   /// Return a privatizable type that encloses both T0 and T1.
4862   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4863   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4864     if (!T0.hasValue())
4865       return T1;
4866     if (!T1.hasValue())
4867       return T0;
4868     if (T0 == T1)
4869       return T0;
4870     return nullptr;
4871   }
4872 
4873   Optional<Type *> getPrivatizableType() const override {
4874     return PrivatizableType;
4875   }
4876 
4877   const std::string getAsStr() const override {
4878     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4879   }
4880 
4881 protected:
4882   Optional<Type *> PrivatizableType;
4883 };
4884 
4885 // TODO: Do this for call site arguments (probably also other values) as well.
4886 
4887 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4888   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4889       : AAPrivatizablePtrImpl(IRP, A) {}
4890 
4891   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4892   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4893     // If this is a byval argument and we know all the call sites (so we can
4894     // rewrite them), there is no need to check them explicitly.
4895     bool AllCallSitesKnown;
4896     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4897         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4898                                true, AllCallSitesKnown))
4899       return getAssociatedValue().getType()->getPointerElementType();
4900 
4901     Optional<Type *> Ty;
4902     unsigned ArgNo = getIRPosition().getArgNo();
4903 
4904     // Make sure the associated call site argument has the same type at all call
4905     // sites and it is an allocation we know is safe to privatize, for now that
4906     // means we only allow alloca instructions.
4907     // TODO: We can additionally analyze the accesses in the callee to  create
4908     //       the type from that information instead. That is a little more
4909     //       involved and will be done in a follow up patch.
4910     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4911       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4912       // Check if a coresponding argument was found or if it is one not
4913       // associated (which can happen for callback calls).
4914       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4915         return false;
4916 
4917       // Check that all call sites agree on a type.
4918       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4919       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4920 
4921       LLVM_DEBUG({
4922         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4923         if (CSTy.hasValue() && CSTy.getValue())
4924           CSTy.getValue()->print(dbgs());
4925         else if (CSTy.hasValue())
4926           dbgs() << "<nullptr>";
4927         else
4928           dbgs() << "<none>";
4929       });
4930 
4931       Ty = combineTypes(Ty, CSTy);
4932 
4933       LLVM_DEBUG({
4934         dbgs() << " : New Type: ";
4935         if (Ty.hasValue() && Ty.getValue())
4936           Ty.getValue()->print(dbgs());
4937         else if (Ty.hasValue())
4938           dbgs() << "<nullptr>";
4939         else
4940           dbgs() << "<none>";
4941         dbgs() << "\n";
4942       });
4943 
4944       return !Ty.hasValue() || Ty.getValue();
4945     };
4946 
4947     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4948       return nullptr;
4949     return Ty;
4950   }
4951 
4952   /// See AbstractAttribute::updateImpl(...).
4953   ChangeStatus updateImpl(Attributor &A) override {
4954     PrivatizableType = identifyPrivatizableType(A);
4955     if (!PrivatizableType.hasValue())
4956       return ChangeStatus::UNCHANGED;
4957     if (!PrivatizableType.getValue())
4958       return indicatePessimisticFixpoint();
4959 
4960     // Avoid arguments with padding for now.
4961     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4962         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4963                                                 A.getInfoCache().getDL())) {
4964       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4965       return indicatePessimisticFixpoint();
4966     }
4967 
4968     // Verify callee and caller agree on how the promoted argument would be
4969     // passed.
4970     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
4971     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
4972     // which doesn't require the arguments ArgumentPromotion wanted to pass.
4973     Function &Fn = *getIRPosition().getAnchorScope();
4974     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
4975     ArgsToPromote.insert(getAssociatedArgument());
4976     const auto *TTI =
4977         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
4978     if (!TTI ||
4979         !ArgumentPromotionPass::areFunctionArgsABICompatible(
4980             Fn, *TTI, ArgsToPromote, Dummy) ||
4981         ArgsToPromote.empty()) {
4982       LLVM_DEBUG(
4983           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
4984                  << Fn.getName() << "\n");
4985       return indicatePessimisticFixpoint();
4986     }
4987 
4988     // Collect the types that will replace the privatizable type in the function
4989     // signature.
4990     SmallVector<Type *, 16> ReplacementTypes;
4991     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
4992 
4993     // Register a rewrite of the argument.
4994     Argument *Arg = getAssociatedArgument();
4995     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
4996       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
4997       return indicatePessimisticFixpoint();
4998     }
4999 
5000     unsigned ArgNo = Arg->getArgNo();
5001 
5002     // Helper to check if for the given call site the associated argument is
5003     // passed to a callback where the privatization would be different.
5004     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5005       SmallVector<const Use *, 4> CallbackUses;
5006       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5007       for (const Use *U : CallbackUses) {
5008         AbstractCallSite CBACS(U);
5009         assert(CBACS && CBACS.isCallbackCall());
5010         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5011           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5012 
5013           LLVM_DEBUG({
5014             dbgs()
5015                 << "[AAPrivatizablePtr] Argument " << *Arg
5016                 << "check if can be privatized in the context of its parent ("
5017                 << Arg->getParent()->getName()
5018                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5019                    "callback ("
5020                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5021                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5022                 << CBACS.getCallArgOperand(CBArg) << " vs "
5023                 << CB.getArgOperand(ArgNo) << "\n"
5024                 << "[AAPrivatizablePtr] " << CBArg << " : "
5025                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5026           });
5027 
5028           if (CBArgNo != int(ArgNo))
5029             continue;
5030           const auto &CBArgPrivAA =
5031               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5032           if (CBArgPrivAA.isValidState()) {
5033             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5034             if (!CBArgPrivTy.hasValue())
5035               continue;
5036             if (CBArgPrivTy.getValue() == PrivatizableType)
5037               continue;
5038           }
5039 
5040           LLVM_DEBUG({
5041             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5042                    << " cannot be privatized in the context of its parent ("
5043                    << Arg->getParent()->getName()
5044                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5045                       "callback ("
5046                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5047                    << ").\n[AAPrivatizablePtr] for which the argument "
5048                       "privatization is not compatible.\n";
5049           });
5050           return false;
5051         }
5052       }
5053       return true;
5054     };
5055 
5056     // Helper to check if for the given call site the associated argument is
5057     // passed to a direct call where the privatization would be different.
5058     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5059       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5060       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5061       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5062              "Expected a direct call operand for callback call operand");
5063 
5064       LLVM_DEBUG({
5065         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5066                << " check if be privatized in the context of its parent ("
5067                << Arg->getParent()->getName()
5068                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5069                   "direct call of ("
5070                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5071                << ").\n";
5072       });
5073 
5074       Function *DCCallee = DC->getCalledFunction();
5075       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5076         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5077             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5078         if (DCArgPrivAA.isValidState()) {
5079           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5080           if (!DCArgPrivTy.hasValue())
5081             return true;
5082           if (DCArgPrivTy.getValue() == PrivatizableType)
5083             return true;
5084         }
5085       }
5086 
5087       LLVM_DEBUG({
5088         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5089                << " cannot be privatized in the context of its parent ("
5090                << Arg->getParent()->getName()
5091                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5092                   "direct call of ("
5093                << ACS.getInstruction()->getCalledFunction()->getName()
5094                << ").\n[AAPrivatizablePtr] for which the argument "
5095                   "privatization is not compatible.\n";
5096       });
5097       return false;
5098     };
5099 
5100     // Helper to check if the associated argument is used at the given abstract
5101     // call site in a way that is incompatible with the privatization assumed
5102     // here.
5103     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5104       if (ACS.isDirectCall())
5105         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5106       if (ACS.isCallbackCall())
5107         return IsCompatiblePrivArgOfDirectCS(ACS);
5108       return false;
5109     };
5110 
5111     bool AllCallSitesKnown;
5112     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5113                                 AllCallSitesKnown))
5114       return indicatePessimisticFixpoint();
5115 
5116     return ChangeStatus::UNCHANGED;
5117   }
5118 
5119   /// Given a type to private \p PrivType, collect the constituates (which are
5120   /// used) in \p ReplacementTypes.
5121   static void
5122   identifyReplacementTypes(Type *PrivType,
5123                            SmallVectorImpl<Type *> &ReplacementTypes) {
5124     // TODO: For now we expand the privatization type to the fullest which can
5125     //       lead to dead arguments that need to be removed later.
5126     assert(PrivType && "Expected privatizable type!");
5127 
5128     // Traverse the type, extract constituate types on the outermost level.
5129     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5130       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5131         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5132     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5133       ReplacementTypes.append(PrivArrayType->getNumElements(),
5134                               PrivArrayType->getElementType());
5135     } else {
5136       ReplacementTypes.push_back(PrivType);
5137     }
5138   }
5139 
5140   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5141   /// The values needed are taken from the arguments of \p F starting at
5142   /// position \p ArgNo.
5143   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5144                                    unsigned ArgNo, Instruction &IP) {
5145     assert(PrivType && "Expected privatizable type!");
5146 
5147     IRBuilder<NoFolder> IRB(&IP);
5148     const DataLayout &DL = F.getParent()->getDataLayout();
5149 
5150     // Traverse the type, build GEPs and stores.
5151     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5152       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5153       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5154         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5155         Value *Ptr = constructPointer(
5156             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5157         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5158       }
5159     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5160       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5161       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5162       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5163         Value *Ptr =
5164             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5165         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5166       }
5167     } else {
5168       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5169     }
5170   }
5171 
5172   /// Extract values from \p Base according to the type \p PrivType at the
5173   /// call position \p ACS. The values are appended to \p ReplacementValues.
5174   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5175                                Value *Base,
5176                                SmallVectorImpl<Value *> &ReplacementValues) {
5177     assert(Base && "Expected base value!");
5178     assert(PrivType && "Expected privatizable type!");
5179     Instruction *IP = ACS.getInstruction();
5180 
5181     IRBuilder<NoFolder> IRB(IP);
5182     const DataLayout &DL = IP->getModule()->getDataLayout();
5183 
5184     if (Base->getType()->getPointerElementType() != PrivType)
5185       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5186                                                  "", ACS.getInstruction());
5187 
5188     // TODO: Improve the alignment of the loads.
5189     // Traverse the type, build GEPs and loads.
5190     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5191       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5192       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5193         Type *PointeeTy = PrivStructType->getElementType(u);
5194         Value *Ptr =
5195             constructPointer(PointeeTy->getPointerTo(), Base,
5196                              PrivStructLayout->getElementOffset(u), IRB, DL);
5197         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5198         L->setAlignment(Align(1));
5199         ReplacementValues.push_back(L);
5200       }
5201     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5202       Type *PointeeTy = PrivArrayType->getElementType();
5203       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5204       Type *PointeePtrTy = PointeeTy->getPointerTo();
5205       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5206         Value *Ptr =
5207             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5208         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5209         L->setAlignment(Align(1));
5210         ReplacementValues.push_back(L);
5211       }
5212     } else {
5213       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5214       L->setAlignment(Align(1));
5215       ReplacementValues.push_back(L);
5216     }
5217   }
5218 
5219   /// See AbstractAttribute::manifest(...)
5220   ChangeStatus manifest(Attributor &A) override {
5221     if (!PrivatizableType.hasValue())
5222       return ChangeStatus::UNCHANGED;
5223     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5224 
5225     // Collect all tail calls in the function as we cannot allow new allocas to
5226     // escape into tail recursion.
5227     // TODO: Be smarter about new allocas escaping into tail calls.
5228     SmallVector<CallInst *, 16> TailCalls;
5229     if (!A.checkForAllInstructions(
5230             [&](Instruction &I) {
5231               CallInst &CI = cast<CallInst>(I);
5232               if (CI.isTailCall())
5233                 TailCalls.push_back(&CI);
5234               return true;
5235             },
5236             *this, {Instruction::Call}))
5237       return ChangeStatus::UNCHANGED;
5238 
5239     Argument *Arg = getAssociatedArgument();
5240 
5241     // Callback to repair the associated function. A new alloca is placed at the
5242     // beginning and initialized with the values passed through arguments. The
5243     // new alloca replaces the use of the old pointer argument.
5244     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5245         [=](const Attributor::ArgumentReplacementInfo &ARI,
5246             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5247           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5248           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5249           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5250                                     Arg->getName() + ".priv", IP);
5251           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5252                                ArgIt->getArgNo(), *IP);
5253           Arg->replaceAllUsesWith(AI);
5254 
5255           for (CallInst *CI : TailCalls)
5256             CI->setTailCall(false);
5257         };
5258 
5259     // Callback to repair a call site of the associated function. The elements
5260     // of the privatizable type are loaded prior to the call and passed to the
5261     // new function version.
5262     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5263         [=](const Attributor::ArgumentReplacementInfo &ARI,
5264             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5265           createReplacementValues(
5266               PrivatizableType.getValue(), ACS,
5267               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5268               NewArgOperands);
5269         };
5270 
5271     // Collect the types that will replace the privatizable type in the function
5272     // signature.
5273     SmallVector<Type *, 16> ReplacementTypes;
5274     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5275 
5276     // Register a rewrite of the argument.
5277     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5278                                            std::move(FnRepairCB),
5279                                            std::move(ACSRepairCB)))
5280       return ChangeStatus::CHANGED;
5281     return ChangeStatus::UNCHANGED;
5282   }
5283 
5284   /// See AbstractAttribute::trackStatistics()
5285   void trackStatistics() const override {
5286     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5287   }
5288 };
5289 
5290 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5291   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5292       : AAPrivatizablePtrImpl(IRP, A) {}
5293 
5294   /// See AbstractAttribute::initialize(...).
5295   virtual void initialize(Attributor &A) override {
5296     // TODO: We can privatize more than arguments.
5297     indicatePessimisticFixpoint();
5298   }
5299 
5300   ChangeStatus updateImpl(Attributor &A) override {
5301     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5302                      "updateImpl will not be called");
5303   }
5304 
5305   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5306   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5307     Value *Obj =
5308         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5309     if (!Obj) {
5310       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5311       return nullptr;
5312     }
5313 
5314     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5315       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5316         if (CI->isOne())
5317           return Obj->getType()->getPointerElementType();
5318     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5319       auto &PrivArgAA =
5320           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5321       if (PrivArgAA.isAssumedPrivatizablePtr())
5322         return Obj->getType()->getPointerElementType();
5323     }
5324 
5325     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5326                          "alloca nor privatizable argument: "
5327                       << *Obj << "!\n");
5328     return nullptr;
5329   }
5330 
5331   /// See AbstractAttribute::trackStatistics()
5332   void trackStatistics() const override {
5333     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5334   }
5335 };
5336 
5337 struct AAPrivatizablePtrCallSiteArgument final
5338     : public AAPrivatizablePtrFloating {
5339   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5340       : AAPrivatizablePtrFloating(IRP, A) {}
5341 
5342   /// See AbstractAttribute::initialize(...).
5343   void initialize(Attributor &A) override {
5344     if (getIRPosition().hasAttr(Attribute::ByVal))
5345       indicateOptimisticFixpoint();
5346   }
5347 
5348   /// See AbstractAttribute::updateImpl(...).
5349   ChangeStatus updateImpl(Attributor &A) override {
5350     PrivatizableType = identifyPrivatizableType(A);
5351     if (!PrivatizableType.hasValue())
5352       return ChangeStatus::UNCHANGED;
5353     if (!PrivatizableType.getValue())
5354       return indicatePessimisticFixpoint();
5355 
5356     const IRPosition &IRP = getIRPosition();
5357     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5358     if (!NoCaptureAA.isAssumedNoCapture()) {
5359       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5360       return indicatePessimisticFixpoint();
5361     }
5362 
5363     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5364     if (!NoAliasAA.isAssumedNoAlias()) {
5365       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5366       return indicatePessimisticFixpoint();
5367     }
5368 
5369     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5370     if (!MemBehaviorAA.isAssumedReadOnly()) {
5371       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5372       return indicatePessimisticFixpoint();
5373     }
5374 
5375     return ChangeStatus::UNCHANGED;
5376   }
5377 
5378   /// See AbstractAttribute::trackStatistics()
5379   void trackStatistics() const override {
5380     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5381   }
5382 };
5383 
5384 struct AAPrivatizablePtrCallSiteReturned final
5385     : public AAPrivatizablePtrFloating {
5386   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5387       : AAPrivatizablePtrFloating(IRP, A) {}
5388 
5389   /// See AbstractAttribute::initialize(...).
5390   void initialize(Attributor &A) override {
5391     // TODO: We can privatize more than arguments.
5392     indicatePessimisticFixpoint();
5393   }
5394 
5395   /// See AbstractAttribute::trackStatistics()
5396   void trackStatistics() const override {
5397     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5398   }
5399 };
5400 
5401 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5402   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5403       : AAPrivatizablePtrFloating(IRP, A) {}
5404 
5405   /// See AbstractAttribute::initialize(...).
5406   void initialize(Attributor &A) override {
5407     // TODO: We can privatize more than arguments.
5408     indicatePessimisticFixpoint();
5409   }
5410 
5411   /// See AbstractAttribute::trackStatistics()
5412   void trackStatistics() const override {
5413     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5414   }
5415 };
5416 
5417 /// -------------------- Memory Behavior Attributes ----------------------------
5418 /// Includes read-none, read-only, and write-only.
5419 /// ----------------------------------------------------------------------------
5420 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5421   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5422       : AAMemoryBehavior(IRP, A) {}
5423 
5424   /// See AbstractAttribute::initialize(...).
5425   void initialize(Attributor &A) override {
5426     intersectAssumedBits(BEST_STATE);
5427     getKnownStateFromValue(getIRPosition(), getState());
5428     IRAttribute::initialize(A);
5429   }
5430 
5431   /// Return the memory behavior information encoded in the IR for \p IRP.
5432   static void getKnownStateFromValue(const IRPosition &IRP,
5433                                      BitIntegerState &State,
5434                                      bool IgnoreSubsumingPositions = false) {
5435     SmallVector<Attribute, 2> Attrs;
5436     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5437     for (const Attribute &Attr : Attrs) {
5438       switch (Attr.getKindAsEnum()) {
5439       case Attribute::ReadNone:
5440         State.addKnownBits(NO_ACCESSES);
5441         break;
5442       case Attribute::ReadOnly:
5443         State.addKnownBits(NO_WRITES);
5444         break;
5445       case Attribute::WriteOnly:
5446         State.addKnownBits(NO_READS);
5447         break;
5448       default:
5449         llvm_unreachable("Unexpected attribute!");
5450       }
5451     }
5452 
5453     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5454       if (!I->mayReadFromMemory())
5455         State.addKnownBits(NO_READS);
5456       if (!I->mayWriteToMemory())
5457         State.addKnownBits(NO_WRITES);
5458     }
5459   }
5460 
5461   /// See AbstractAttribute::getDeducedAttributes(...).
5462   void getDeducedAttributes(LLVMContext &Ctx,
5463                             SmallVectorImpl<Attribute> &Attrs) const override {
5464     assert(Attrs.size() == 0);
5465     if (isAssumedReadNone())
5466       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5467     else if (isAssumedReadOnly())
5468       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5469     else if (isAssumedWriteOnly())
5470       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5471     assert(Attrs.size() <= 1);
5472   }
5473 
5474   /// See AbstractAttribute::manifest(...).
5475   ChangeStatus manifest(Attributor &A) override {
5476     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5477       return ChangeStatus::UNCHANGED;
5478 
5479     const IRPosition &IRP = getIRPosition();
5480 
5481     // Check if we would improve the existing attributes first.
5482     SmallVector<Attribute, 4> DeducedAttrs;
5483     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5484     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5485           return IRP.hasAttr(Attr.getKindAsEnum(),
5486                              /* IgnoreSubsumingPositions */ true);
5487         }))
5488       return ChangeStatus::UNCHANGED;
5489 
5490     // Clear existing attributes.
5491     IRP.removeAttrs(AttrKinds);
5492 
5493     // Use the generic manifest method.
5494     return IRAttribute::manifest(A);
5495   }
5496 
5497   /// See AbstractState::getAsStr().
5498   const std::string getAsStr() const override {
5499     if (isAssumedReadNone())
5500       return "readnone";
5501     if (isAssumedReadOnly())
5502       return "readonly";
5503     if (isAssumedWriteOnly())
5504       return "writeonly";
5505     return "may-read/write";
5506   }
5507 
5508   /// The set of IR attributes AAMemoryBehavior deals with.
5509   static const Attribute::AttrKind AttrKinds[3];
5510 };
5511 
5512 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5513     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5514 
5515 /// Memory behavior attribute for a floating value.
5516 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5517   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5518       : AAMemoryBehaviorImpl(IRP, A) {}
5519 
5520   /// See AbstractAttribute::initialize(...).
5521   void initialize(Attributor &A) override {
5522     AAMemoryBehaviorImpl::initialize(A);
5523     // Initialize the use vector with all direct uses of the associated value.
5524     for (const Use &U : getAssociatedValue().uses())
5525       Uses.insert(&U);
5526   }
5527 
5528   /// See AbstractAttribute::updateImpl(...).
5529   ChangeStatus updateImpl(Attributor &A) override;
5530 
5531   /// See AbstractAttribute::trackStatistics()
5532   void trackStatistics() const override {
5533     if (isAssumedReadNone())
5534       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5535     else if (isAssumedReadOnly())
5536       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5537     else if (isAssumedWriteOnly())
5538       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5539   }
5540 
5541 private:
5542   /// Return true if users of \p UserI might access the underlying
5543   /// variable/location described by \p U and should therefore be analyzed.
5544   bool followUsersOfUseIn(Attributor &A, const Use *U,
5545                           const Instruction *UserI);
5546 
5547   /// Update the state according to the effect of use \p U in \p UserI.
5548   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5549 
5550 protected:
5551   /// Container for (transitive) uses of the associated argument.
5552   SetVector<const Use *> Uses;
5553 };
5554 
5555 /// Memory behavior attribute for function argument.
5556 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5557   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5558       : AAMemoryBehaviorFloating(IRP, A) {}
5559 
5560   /// See AbstractAttribute::initialize(...).
5561   void initialize(Attributor &A) override {
5562     intersectAssumedBits(BEST_STATE);
5563     const IRPosition &IRP = getIRPosition();
5564     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5565     // can query it when we use has/getAttr. That would allow us to reuse the
5566     // initialize of the base class here.
5567     bool HasByVal =
5568         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5569     getKnownStateFromValue(IRP, getState(),
5570                            /* IgnoreSubsumingPositions */ HasByVal);
5571 
5572     // Initialize the use vector with all direct uses of the associated value.
5573     Argument *Arg = getAssociatedArgument();
5574     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5575       indicatePessimisticFixpoint();
5576     } else {
5577       // Initialize the use vector with all direct uses of the associated value.
5578       for (const Use &U : Arg->uses())
5579         Uses.insert(&U);
5580     }
5581   }
5582 
5583   ChangeStatus manifest(Attributor &A) override {
5584     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5585     if (!getAssociatedValue().getType()->isPointerTy())
5586       return ChangeStatus::UNCHANGED;
5587 
5588     // TODO: From readattrs.ll: "inalloca parameters are always
5589     //                           considered written"
5590     if (hasAttr({Attribute::InAlloca})) {
5591       removeKnownBits(NO_WRITES);
5592       removeAssumedBits(NO_WRITES);
5593     }
5594     return AAMemoryBehaviorFloating::manifest(A);
5595   }
5596 
5597   /// See AbstractAttribute::trackStatistics()
5598   void trackStatistics() const override {
5599     if (isAssumedReadNone())
5600       STATS_DECLTRACK_ARG_ATTR(readnone)
5601     else if (isAssumedReadOnly())
5602       STATS_DECLTRACK_ARG_ATTR(readonly)
5603     else if (isAssumedWriteOnly())
5604       STATS_DECLTRACK_ARG_ATTR(writeonly)
5605   }
5606 };
5607 
5608 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5609   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5610       : AAMemoryBehaviorArgument(IRP, A) {}
5611 
5612   /// See AbstractAttribute::initialize(...).
5613   void initialize(Attributor &A) override {
5614     if (Argument *Arg = getAssociatedArgument()) {
5615       if (Arg->hasByValAttr()) {
5616         addKnownBits(NO_WRITES);
5617         removeKnownBits(NO_READS);
5618         removeAssumedBits(NO_READS);
5619       }
5620     }
5621     AAMemoryBehaviorArgument::initialize(A);
5622   }
5623 
5624   /// See AbstractAttribute::updateImpl(...).
5625   ChangeStatus updateImpl(Attributor &A) override {
5626     // TODO: Once we have call site specific value information we can provide
5627     //       call site specific liveness liveness information and then it makes
5628     //       sense to specialize attributes for call sites arguments instead of
5629     //       redirecting requests to the callee argument.
5630     Argument *Arg = getAssociatedArgument();
5631     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5632     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5633     return clampStateAndIndicateChange(
5634         getState(),
5635         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5636   }
5637 
5638   /// See AbstractAttribute::trackStatistics()
5639   void trackStatistics() const override {
5640     if (isAssumedReadNone())
5641       STATS_DECLTRACK_CSARG_ATTR(readnone)
5642     else if (isAssumedReadOnly())
5643       STATS_DECLTRACK_CSARG_ATTR(readonly)
5644     else if (isAssumedWriteOnly())
5645       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5646   }
5647 };
5648 
5649 /// Memory behavior attribute for a call site return position.
5650 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5651   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5652       : AAMemoryBehaviorFloating(IRP, A) {}
5653 
5654   /// See AbstractAttribute::manifest(...).
5655   ChangeStatus manifest(Attributor &A) override {
5656     // We do not annotate returned values.
5657     return ChangeStatus::UNCHANGED;
5658   }
5659 
5660   /// See AbstractAttribute::trackStatistics()
5661   void trackStatistics() const override {}
5662 };
5663 
5664 /// An AA to represent the memory behavior function attributes.
5665 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5666   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5667       : AAMemoryBehaviorImpl(IRP, A) {}
5668 
5669   /// See AbstractAttribute::updateImpl(Attributor &A).
5670   virtual ChangeStatus updateImpl(Attributor &A) override;
5671 
5672   /// See AbstractAttribute::manifest(...).
5673   ChangeStatus manifest(Attributor &A) override {
5674     Function &F = cast<Function>(getAnchorValue());
5675     if (isAssumedReadNone()) {
5676       F.removeFnAttr(Attribute::ArgMemOnly);
5677       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5678       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5679     }
5680     return AAMemoryBehaviorImpl::manifest(A);
5681   }
5682 
5683   /// See AbstractAttribute::trackStatistics()
5684   void trackStatistics() const override {
5685     if (isAssumedReadNone())
5686       STATS_DECLTRACK_FN_ATTR(readnone)
5687     else if (isAssumedReadOnly())
5688       STATS_DECLTRACK_FN_ATTR(readonly)
5689     else if (isAssumedWriteOnly())
5690       STATS_DECLTRACK_FN_ATTR(writeonly)
5691   }
5692 };
5693 
5694 /// AAMemoryBehavior attribute for call sites.
5695 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5696   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5697       : AAMemoryBehaviorImpl(IRP, A) {}
5698 
5699   /// See AbstractAttribute::initialize(...).
5700   void initialize(Attributor &A) override {
5701     AAMemoryBehaviorImpl::initialize(A);
5702     Function *F = getAssociatedFunction();
5703     if (!F || !A.isFunctionIPOAmendable(*F)) {
5704       indicatePessimisticFixpoint();
5705       return;
5706     }
5707   }
5708 
5709   /// See AbstractAttribute::updateImpl(...).
5710   ChangeStatus updateImpl(Attributor &A) override {
5711     // TODO: Once we have call site specific value information we can provide
5712     //       call site specific liveness liveness information and then it makes
5713     //       sense to specialize attributes for call sites arguments instead of
5714     //       redirecting requests to the callee argument.
5715     Function *F = getAssociatedFunction();
5716     const IRPosition &FnPos = IRPosition::function(*F);
5717     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5718     return clampStateAndIndicateChange(
5719         getState(),
5720         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5721   }
5722 
5723   /// See AbstractAttribute::trackStatistics()
5724   void trackStatistics() const override {
5725     if (isAssumedReadNone())
5726       STATS_DECLTRACK_CS_ATTR(readnone)
5727     else if (isAssumedReadOnly())
5728       STATS_DECLTRACK_CS_ATTR(readonly)
5729     else if (isAssumedWriteOnly())
5730       STATS_DECLTRACK_CS_ATTR(writeonly)
5731   }
5732 };
5733 
5734 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5735 
5736   // The current assumed state used to determine a change.
5737   auto AssumedState = getAssumed();
5738 
5739   auto CheckRWInst = [&](Instruction &I) {
5740     // If the instruction has an own memory behavior state, use it to restrict
5741     // the local state. No further analysis is required as the other memory
5742     // state is as optimistic as it gets.
5743     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5744       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5745           *this, IRPosition::callsite_function(*CB));
5746       intersectAssumedBits(MemBehaviorAA.getAssumed());
5747       return !isAtFixpoint();
5748     }
5749 
5750     // Remove access kind modifiers if necessary.
5751     if (I.mayReadFromMemory())
5752       removeAssumedBits(NO_READS);
5753     if (I.mayWriteToMemory())
5754       removeAssumedBits(NO_WRITES);
5755     return !isAtFixpoint();
5756   };
5757 
5758   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5759     return indicatePessimisticFixpoint();
5760 
5761   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5762                                         : ChangeStatus::UNCHANGED;
5763 }
5764 
5765 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5766 
5767   const IRPosition &IRP = getIRPosition();
5768   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5769   AAMemoryBehavior::StateType &S = getState();
5770 
5771   // First, check the function scope. We take the known information and we avoid
5772   // work if the assumed information implies the current assumed information for
5773   // this attribute. This is a valid for all but byval arguments.
5774   Argument *Arg = IRP.getAssociatedArgument();
5775   AAMemoryBehavior::base_t FnMemAssumedState =
5776       AAMemoryBehavior::StateType::getWorstState();
5777   if (!Arg || !Arg->hasByValAttr()) {
5778     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5779         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5780     FnMemAssumedState = FnMemAA.getAssumed();
5781     S.addKnownBits(FnMemAA.getKnown());
5782     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5783       return ChangeStatus::UNCHANGED;
5784   }
5785 
5786   // Make sure the value is not captured (except through "return"), if
5787   // it is, any information derived would be irrelevant anyway as we cannot
5788   // check the potential aliases introduced by the capture. However, no need
5789   // to fall back to anythign less optimistic than the function state.
5790   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5791       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5792   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5793     S.intersectAssumedBits(FnMemAssumedState);
5794     return ChangeStatus::CHANGED;
5795   }
5796 
5797   // The current assumed state used to determine a change.
5798   auto AssumedState = S.getAssumed();
5799 
5800   // Liveness information to exclude dead users.
5801   // TODO: Take the FnPos once we have call site specific liveness information.
5802   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5803       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5804       /* TrackDependence */ false);
5805 
5806   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5807   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5808     const Use *U = Uses[i];
5809     Instruction *UserI = cast<Instruction>(U->getUser());
5810     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5811                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5812                       << "]\n");
5813     if (A.isAssumedDead(*U, this, &LivenessAA))
5814       continue;
5815 
5816     // Droppable users, e.g., llvm::assume does not actually perform any action.
5817     if (UserI->isDroppable())
5818       continue;
5819 
5820     // Check if the users of UserI should also be visited.
5821     if (followUsersOfUseIn(A, U, UserI))
5822       for (const Use &UserIUse : UserI->uses())
5823         Uses.insert(&UserIUse);
5824 
5825     // If UserI might touch memory we analyze the use in detail.
5826     if (UserI->mayReadOrWriteMemory())
5827       analyzeUseIn(A, U, UserI);
5828   }
5829 
5830   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5831                                         : ChangeStatus::UNCHANGED;
5832 }
5833 
5834 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5835                                                   const Instruction *UserI) {
5836   // The loaded value is unrelated to the pointer argument, no need to
5837   // follow the users of the load.
5838   if (isa<LoadInst>(UserI))
5839     return false;
5840 
5841   // By default we follow all uses assuming UserI might leak information on U,
5842   // we have special handling for call sites operands though.
5843   const auto *CB = dyn_cast<CallBase>(UserI);
5844   if (!CB || !CB->isArgOperand(U))
5845     return true;
5846 
5847   // If the use is a call argument known not to be captured, the users of
5848   // the call do not need to be visited because they have to be unrelated to
5849   // the input. Note that this check is not trivial even though we disallow
5850   // general capturing of the underlying argument. The reason is that the
5851   // call might the argument "through return", which we allow and for which we
5852   // need to check call users.
5853   if (U->get()->getType()->isPointerTy()) {
5854     unsigned ArgNo = CB->getArgOperandNo(U);
5855     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5856         *this, IRPosition::callsite_argument(*CB, ArgNo),
5857         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5858     return !ArgNoCaptureAA.isAssumedNoCapture();
5859   }
5860 
5861   return true;
5862 }
5863 
5864 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5865                                             const Instruction *UserI) {
5866   assert(UserI->mayReadOrWriteMemory());
5867 
5868   switch (UserI->getOpcode()) {
5869   default:
5870     // TODO: Handle all atomics and other side-effect operations we know of.
5871     break;
5872   case Instruction::Load:
5873     // Loads cause the NO_READS property to disappear.
5874     removeAssumedBits(NO_READS);
5875     return;
5876 
5877   case Instruction::Store:
5878     // Stores cause the NO_WRITES property to disappear if the use is the
5879     // pointer operand. Note that we do assume that capturing was taken care of
5880     // somewhere else.
5881     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5882       removeAssumedBits(NO_WRITES);
5883     return;
5884 
5885   case Instruction::Call:
5886   case Instruction::CallBr:
5887   case Instruction::Invoke: {
5888     // For call sites we look at the argument memory behavior attribute (this
5889     // could be recursive!) in order to restrict our own state.
5890     const auto *CB = cast<CallBase>(UserI);
5891 
5892     // Give up on operand bundles.
5893     if (CB->isBundleOperand(U)) {
5894       indicatePessimisticFixpoint();
5895       return;
5896     }
5897 
5898     // Calling a function does read the function pointer, maybe write it if the
5899     // function is self-modifying.
5900     if (CB->isCallee(U)) {
5901       removeAssumedBits(NO_READS);
5902       break;
5903     }
5904 
5905     // Adjust the possible access behavior based on the information on the
5906     // argument.
5907     IRPosition Pos;
5908     if (U->get()->getType()->isPointerTy())
5909       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5910     else
5911       Pos = IRPosition::callsite_function(*CB);
5912     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5913         *this, Pos,
5914         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5915     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5916     // and at least "known".
5917     intersectAssumedBits(MemBehaviorAA.getAssumed());
5918     return;
5919   }
5920   };
5921 
5922   // Generally, look at the "may-properties" and adjust the assumed state if we
5923   // did not trigger special handling before.
5924   if (UserI->mayReadFromMemory())
5925     removeAssumedBits(NO_READS);
5926   if (UserI->mayWriteToMemory())
5927     removeAssumedBits(NO_WRITES);
5928 }
5929 
5930 } // namespace
5931 
5932 /// -------------------- Memory Locations Attributes ---------------------------
5933 /// Includes read-none, argmemonly, inaccessiblememonly,
5934 /// inaccessiblememorargmemonly
5935 /// ----------------------------------------------------------------------------
5936 
5937 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5938     AAMemoryLocation::MemoryLocationsKind MLK) {
5939   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5940     return "all memory";
5941   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5942     return "no memory";
5943   std::string S = "memory:";
5944   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5945     S += "stack,";
5946   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5947     S += "constant,";
5948   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5949     S += "internal global,";
5950   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5951     S += "external global,";
5952   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5953     S += "argument,";
5954   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5955     S += "inaccessible,";
5956   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5957     S += "malloced,";
5958   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
5959     S += "unknown,";
5960   S.pop_back();
5961   return S;
5962 }
5963 
5964 namespace {
5965 struct AAMemoryLocationImpl : public AAMemoryLocation {
5966 
5967   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
5968       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
5969     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
5970       AccessKind2Accesses[u] = nullptr;
5971   }
5972 
5973   ~AAMemoryLocationImpl() {
5974     // The AccessSets are allocated via a BumpPtrAllocator, we call
5975     // the destructor manually.
5976     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
5977       if (AccessKind2Accesses[u])
5978         AccessKind2Accesses[u]->~AccessSet();
5979   }
5980 
5981   /// See AbstractAttribute::initialize(...).
5982   void initialize(Attributor &A) override {
5983     intersectAssumedBits(BEST_STATE);
5984     getKnownStateFromValue(getIRPosition(), getState());
5985     IRAttribute::initialize(A);
5986   }
5987 
5988   /// Return the memory behavior information encoded in the IR for \p IRP.
5989   static void getKnownStateFromValue(const IRPosition &IRP,
5990                                      BitIntegerState &State,
5991                                      bool IgnoreSubsumingPositions = false) {
5992     SmallVector<Attribute, 2> Attrs;
5993     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5994     for (const Attribute &Attr : Attrs) {
5995       switch (Attr.getKindAsEnum()) {
5996       case Attribute::ReadNone:
5997         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
5998         break;
5999       case Attribute::InaccessibleMemOnly:
6000         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6001         break;
6002       case Attribute::ArgMemOnly:
6003         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6004         break;
6005       case Attribute::InaccessibleMemOrArgMemOnly:
6006         State.addKnownBits(
6007             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6008         break;
6009       default:
6010         llvm_unreachable("Unexpected attribute!");
6011       }
6012     }
6013   }
6014 
6015   /// See AbstractAttribute::getDeducedAttributes(...).
6016   void getDeducedAttributes(LLVMContext &Ctx,
6017                             SmallVectorImpl<Attribute> &Attrs) const override {
6018     assert(Attrs.size() == 0);
6019     if (isAssumedReadNone()) {
6020       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6021     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6022       if (isAssumedInaccessibleMemOnly())
6023         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6024       else if (isAssumedArgMemOnly())
6025         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6026       else if (isAssumedInaccessibleOrArgMemOnly())
6027         Attrs.push_back(
6028             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6029     }
6030     assert(Attrs.size() <= 1);
6031   }
6032 
6033   /// See AbstractAttribute::manifest(...).
6034   ChangeStatus manifest(Attributor &A) override {
6035     const IRPosition &IRP = getIRPosition();
6036 
6037     // Check if we would improve the existing attributes first.
6038     SmallVector<Attribute, 4> DeducedAttrs;
6039     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6040     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6041           return IRP.hasAttr(Attr.getKindAsEnum(),
6042                              /* IgnoreSubsumingPositions */ true);
6043         }))
6044       return ChangeStatus::UNCHANGED;
6045 
6046     // Clear existing attributes.
6047     IRP.removeAttrs(AttrKinds);
6048     if (isAssumedReadNone())
6049       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6050 
6051     // Use the generic manifest method.
6052     return IRAttribute::manifest(A);
6053   }
6054 
6055   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6056   bool checkForAllAccessesToMemoryKind(
6057       function_ref<bool(const Instruction *, const Value *, AccessKind,
6058                         MemoryLocationsKind)>
6059           Pred,
6060       MemoryLocationsKind RequestedMLK) const override {
6061     if (!isValidState())
6062       return false;
6063 
6064     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6065     if (AssumedMLK == NO_LOCATIONS)
6066       return true;
6067 
6068     unsigned Idx = 0;
6069     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6070          CurMLK *= 2, ++Idx) {
6071       if (CurMLK & RequestedMLK)
6072         continue;
6073 
6074       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6075         for (const AccessInfo &AI : *Accesses)
6076           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6077             return false;
6078     }
6079 
6080     return true;
6081   }
6082 
6083   ChangeStatus indicatePessimisticFixpoint() override {
6084     // If we give up and indicate a pessimistic fixpoint this instruction will
6085     // become an access for all potential access kinds:
6086     // TODO: Add pointers for argmemonly and globals to improve the results of
6087     //       checkForAllAccessesToMemoryKind.
6088     bool Changed = false;
6089     MemoryLocationsKind KnownMLK = getKnown();
6090     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6091     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6092       if (!(CurMLK & KnownMLK))
6093         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6094                                   getAccessKindFromInst(I));
6095     return AAMemoryLocation::indicatePessimisticFixpoint();
6096   }
6097 
6098 protected:
6099   /// Helper struct to tie together an instruction that has a read or write
6100   /// effect with the pointer it accesses (if any).
6101   struct AccessInfo {
6102 
6103     /// The instruction that caused the access.
6104     const Instruction *I;
6105 
6106     /// The base pointer that is accessed, or null if unknown.
6107     const Value *Ptr;
6108 
6109     /// The kind of access (read/write/read+write).
6110     AccessKind Kind;
6111 
6112     bool operator==(const AccessInfo &RHS) const {
6113       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6114     }
6115     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6116       if (LHS.I != RHS.I)
6117         return LHS.I < RHS.I;
6118       if (LHS.Ptr != RHS.Ptr)
6119         return LHS.Ptr < RHS.Ptr;
6120       if (LHS.Kind != RHS.Kind)
6121         return LHS.Kind < RHS.Kind;
6122       return false;
6123     }
6124   };
6125 
6126   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6127   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6128   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6129   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6130 
6131   /// Return the kind(s) of location that may be accessed by \p V.
6132   AAMemoryLocation::MemoryLocationsKind
6133   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6134 
6135   /// Return the access kind as determined by \p I.
6136   AccessKind getAccessKindFromInst(const Instruction *I) {
6137     AccessKind AK = READ_WRITE;
6138     if (I) {
6139       AK = I->mayReadFromMemory() ? READ : NONE;
6140       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6141     }
6142     return AK;
6143   }
6144 
6145   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6146   /// an access of kind \p AK to a \p MLK memory location with the access
6147   /// pointer \p Ptr.
6148   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6149                                  MemoryLocationsKind MLK, const Instruction *I,
6150                                  const Value *Ptr, bool &Changed,
6151                                  AccessKind AK = READ_WRITE) {
6152 
6153     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6154     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6155     if (!Accesses)
6156       Accesses = new (Allocator) AccessSet();
6157     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6158     State.removeAssumedBits(MLK);
6159   }
6160 
6161   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6162   /// arguments, and update the state and access map accordingly.
6163   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6164                           AAMemoryLocation::StateType &State, bool &Changed);
6165 
6166   /// Used to allocate access sets.
6167   BumpPtrAllocator &Allocator;
6168 
6169   /// The set of IR attributes AAMemoryLocation deals with.
6170   static const Attribute::AttrKind AttrKinds[4];
6171 };
6172 
6173 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6174     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6175     Attribute::InaccessibleMemOrArgMemOnly};
6176 
6177 void AAMemoryLocationImpl::categorizePtrValue(
6178     Attributor &A, const Instruction &I, const Value &Ptr,
6179     AAMemoryLocation::StateType &State, bool &Changed) {
6180   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6181                     << Ptr << " ["
6182                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6183 
6184   auto StripGEPCB = [](Value *V) -> Value * {
6185     auto *GEP = dyn_cast<GEPOperator>(V);
6186     while (GEP) {
6187       V = GEP->getPointerOperand();
6188       GEP = dyn_cast<GEPOperator>(V);
6189     }
6190     return V;
6191   };
6192 
6193   auto VisitValueCB = [&](Value &V, const Instruction *,
6194                           AAMemoryLocation::StateType &T,
6195                           bool Stripped) -> bool {
6196     MemoryLocationsKind MLK = NO_LOCATIONS;
6197     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6198     if (isa<UndefValue>(V))
6199       return true;
6200     if (auto *Arg = dyn_cast<Argument>(&V)) {
6201       if (Arg->hasByValAttr())
6202         MLK = NO_LOCAL_MEM;
6203       else
6204         MLK = NO_ARGUMENT_MEM;
6205     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6206       if (GV->hasLocalLinkage())
6207         MLK = NO_GLOBAL_INTERNAL_MEM;
6208       else
6209         MLK = NO_GLOBAL_EXTERNAL_MEM;
6210     } else if (isa<AllocaInst>(V))
6211       MLK = NO_LOCAL_MEM;
6212     else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6213       const auto &NoAliasAA =
6214           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6215       if (NoAliasAA.isAssumedNoAlias())
6216         MLK = NO_MALLOCED_MEM;
6217       else
6218         MLK = NO_UNKOWN_MEM;
6219     } else {
6220       MLK = NO_UNKOWN_MEM;
6221     }
6222 
6223     assert(MLK != NO_LOCATIONS && "No location specified!");
6224     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6225                               getAccessKindFromInst(&I));
6226     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6227                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6228                       << "\n");
6229     return true;
6230   };
6231 
6232   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6233           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6234           /* MaxValues */ 32, StripGEPCB)) {
6235     LLVM_DEBUG(
6236         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6237     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6238                               getAccessKindFromInst(&I));
6239   } else {
6240     LLVM_DEBUG(
6241         dbgs()
6242         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6243         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6244   }
6245 }
6246 
6247 AAMemoryLocation::MemoryLocationsKind
6248 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6249                                                   bool &Changed) {
6250   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6251                     << I << "\n");
6252 
6253   AAMemoryLocation::StateType AccessedLocs;
6254   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6255 
6256   if (auto *CB = dyn_cast<CallBase>(&I)) {
6257 
6258     // First check if we assume any memory is access is visible.
6259     const auto &CBMemLocationAA =
6260         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6261     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6262                       << " [" << CBMemLocationAA << "]\n");
6263 
6264     if (CBMemLocationAA.isAssumedReadNone())
6265       return NO_LOCATIONS;
6266 
6267     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6268       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6269                                 Changed, getAccessKindFromInst(&I));
6270       return AccessedLocs.getAssumed();
6271     }
6272 
6273     uint32_t CBAssumedNotAccessedLocs =
6274         CBMemLocationAA.getAssumedNotAccessedLocation();
6275 
6276     // Set the argmemonly and global bit as we handle them separately below.
6277     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6278         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6279 
6280     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6281       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6282         continue;
6283       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6284                                 getAccessKindFromInst(&I));
6285     }
6286 
6287     // Now handle global memory if it might be accessed. This is slightly tricky
6288     // as NO_GLOBAL_MEM has multiple bits set.
6289     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6290     if (HasGlobalAccesses) {
6291       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6292                             AccessKind Kind, MemoryLocationsKind MLK) {
6293         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6294                                   getAccessKindFromInst(&I));
6295         return true;
6296       };
6297       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6298               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6299         return AccessedLocs.getWorstState();
6300     }
6301 
6302     LLVM_DEBUG(
6303         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6304                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6305 
6306     // Now handle argument memory if it might be accessed.
6307     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6308     if (HasArgAccesses) {
6309       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6310            ++ArgNo) {
6311 
6312         // Skip non-pointer arguments.
6313         const Value *ArgOp = CB->getArgOperand(ArgNo);
6314         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6315           continue;
6316 
6317         // Skip readnone arguments.
6318         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6319         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6320             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6321 
6322         if (ArgOpMemLocationAA.isAssumedReadNone())
6323           continue;
6324 
6325         // Categorize potentially accessed pointer arguments as if there was an
6326         // access instruction with them as pointer.
6327         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6328       }
6329     }
6330 
6331     LLVM_DEBUG(
6332         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6333                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6334 
6335     return AccessedLocs.getAssumed();
6336   }
6337 
6338   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6339     LLVM_DEBUG(
6340         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6341                << I << " [" << *Ptr << "]\n");
6342     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6343     return AccessedLocs.getAssumed();
6344   }
6345 
6346   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6347                     << I << "\n");
6348   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6349                             getAccessKindFromInst(&I));
6350   return AccessedLocs.getAssumed();
6351 }
6352 
6353 /// An AA to represent the memory behavior function attributes.
6354 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6355   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6356       : AAMemoryLocationImpl(IRP, A) {}
6357 
6358   /// See AbstractAttribute::updateImpl(Attributor &A).
6359   virtual ChangeStatus updateImpl(Attributor &A) override {
6360 
6361     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6362         *this, getIRPosition(), /* TrackDependence */ false);
6363     if (MemBehaviorAA.isAssumedReadNone()) {
6364       if (MemBehaviorAA.isKnownReadNone())
6365         return indicateOptimisticFixpoint();
6366       assert(isAssumedReadNone() &&
6367              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6368       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6369       return ChangeStatus::UNCHANGED;
6370     }
6371 
6372     // The current assumed state used to determine a change.
6373     auto AssumedState = getAssumed();
6374     bool Changed = false;
6375 
6376     auto CheckRWInst = [&](Instruction &I) {
6377       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6378       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6379                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6380       removeAssumedBits(inverseLocation(MLK, false, false));
6381       return true;
6382     };
6383 
6384     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6385       return indicatePessimisticFixpoint();
6386 
6387     Changed |= AssumedState != getAssumed();
6388     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6389   }
6390 
6391   /// See AbstractAttribute::trackStatistics()
6392   void trackStatistics() const override {
6393     if (isAssumedReadNone())
6394       STATS_DECLTRACK_FN_ATTR(readnone)
6395     else if (isAssumedArgMemOnly())
6396       STATS_DECLTRACK_FN_ATTR(argmemonly)
6397     else if (isAssumedInaccessibleMemOnly())
6398       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6399     else if (isAssumedInaccessibleOrArgMemOnly())
6400       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6401   }
6402 };
6403 
6404 /// AAMemoryLocation attribute for call sites.
6405 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6406   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6407       : AAMemoryLocationImpl(IRP, A) {}
6408 
6409   /// See AbstractAttribute::initialize(...).
6410   void initialize(Attributor &A) override {
6411     AAMemoryLocationImpl::initialize(A);
6412     Function *F = getAssociatedFunction();
6413     if (!F || !A.isFunctionIPOAmendable(*F)) {
6414       indicatePessimisticFixpoint();
6415       return;
6416     }
6417   }
6418 
6419   /// See AbstractAttribute::updateImpl(...).
6420   ChangeStatus updateImpl(Attributor &A) override {
6421     // TODO: Once we have call site specific value information we can provide
6422     //       call site specific liveness liveness information and then it makes
6423     //       sense to specialize attributes for call sites arguments instead of
6424     //       redirecting requests to the callee argument.
6425     Function *F = getAssociatedFunction();
6426     const IRPosition &FnPos = IRPosition::function(*F);
6427     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6428     bool Changed = false;
6429     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6430                           AccessKind Kind, MemoryLocationsKind MLK) {
6431       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6432                                 getAccessKindFromInst(I));
6433       return true;
6434     };
6435     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6436       return indicatePessimisticFixpoint();
6437     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6438   }
6439 
6440   /// See AbstractAttribute::trackStatistics()
6441   void trackStatistics() const override {
6442     if (isAssumedReadNone())
6443       STATS_DECLTRACK_CS_ATTR(readnone)
6444   }
6445 };
6446 
6447 /// ------------------ Value Constant Range Attribute -------------------------
6448 
6449 struct AAValueConstantRangeImpl : AAValueConstantRange {
6450   using StateType = IntegerRangeState;
6451   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6452       : AAValueConstantRange(IRP, A) {}
6453 
6454   /// See AbstractAttribute::getAsStr().
6455   const std::string getAsStr() const override {
6456     std::string Str;
6457     llvm::raw_string_ostream OS(Str);
6458     OS << "range(" << getBitWidth() << ")<";
6459     getKnown().print(OS);
6460     OS << " / ";
6461     getAssumed().print(OS);
6462     OS << ">";
6463     return OS.str();
6464   }
6465 
6466   /// Helper function to get a SCEV expr for the associated value at program
6467   /// point \p I.
6468   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6469     if (!getAnchorScope())
6470       return nullptr;
6471 
6472     ScalarEvolution *SE =
6473         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6474             *getAnchorScope());
6475 
6476     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6477         *getAnchorScope());
6478 
6479     if (!SE || !LI)
6480       return nullptr;
6481 
6482     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6483     if (!I)
6484       return S;
6485 
6486     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6487   }
6488 
6489   /// Helper function to get a range from SCEV for the associated value at
6490   /// program point \p I.
6491   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6492                                          const Instruction *I = nullptr) const {
6493     if (!getAnchorScope())
6494       return getWorstState(getBitWidth());
6495 
6496     ScalarEvolution *SE =
6497         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6498             *getAnchorScope());
6499 
6500     const SCEV *S = getSCEV(A, I);
6501     if (!SE || !S)
6502       return getWorstState(getBitWidth());
6503 
6504     return SE->getUnsignedRange(S);
6505   }
6506 
6507   /// Helper function to get a range from LVI for the associated value at
6508   /// program point \p I.
6509   ConstantRange
6510   getConstantRangeFromLVI(Attributor &A,
6511                           const Instruction *CtxI = nullptr) const {
6512     if (!getAnchorScope())
6513       return getWorstState(getBitWidth());
6514 
6515     LazyValueInfo *LVI =
6516         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6517             *getAnchorScope());
6518 
6519     if (!LVI || !CtxI)
6520       return getWorstState(getBitWidth());
6521     return LVI->getConstantRange(&getAssociatedValue(),
6522                                  const_cast<BasicBlock *>(CtxI->getParent()),
6523                                  const_cast<Instruction *>(CtxI));
6524   }
6525 
6526   /// See AAValueConstantRange::getKnownConstantRange(..).
6527   ConstantRange
6528   getKnownConstantRange(Attributor &A,
6529                         const Instruction *CtxI = nullptr) const override {
6530     if (!CtxI || CtxI == getCtxI())
6531       return getKnown();
6532 
6533     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6534     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6535     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6536   }
6537 
6538   /// See AAValueConstantRange::getAssumedConstantRange(..).
6539   ConstantRange
6540   getAssumedConstantRange(Attributor &A,
6541                           const Instruction *CtxI = nullptr) const override {
6542     // TODO: Make SCEV use Attributor assumption.
6543     //       We may be able to bound a variable range via assumptions in
6544     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6545     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6546 
6547     if (!CtxI || CtxI == getCtxI())
6548       return getAssumed();
6549 
6550     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6551     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6552     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6553   }
6554 
6555   /// See AbstractAttribute::initialize(..).
6556   void initialize(Attributor &A) override {
6557     // Intersect a range given by SCEV.
6558     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6559 
6560     // Intersect a range given by LVI.
6561     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6562   }
6563 
6564   /// Helper function to create MDNode for range metadata.
6565   static MDNode *
6566   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6567                             const ConstantRange &AssumedConstantRange) {
6568     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6569                                   Ty, AssumedConstantRange.getLower())),
6570                               ConstantAsMetadata::get(ConstantInt::get(
6571                                   Ty, AssumedConstantRange.getUpper()))};
6572     return MDNode::get(Ctx, LowAndHigh);
6573   }
6574 
6575   /// Return true if \p Assumed is included in \p KnownRanges.
6576   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6577 
6578     if (Assumed.isFullSet())
6579       return false;
6580 
6581     if (!KnownRanges)
6582       return true;
6583 
6584     // If multiple ranges are annotated in IR, we give up to annotate assumed
6585     // range for now.
6586 
6587     // TODO:  If there exists a known range which containts assumed range, we
6588     // can say assumed range is better.
6589     if (KnownRanges->getNumOperands() > 2)
6590       return false;
6591 
6592     ConstantInt *Lower =
6593         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6594     ConstantInt *Upper =
6595         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6596 
6597     ConstantRange Known(Lower->getValue(), Upper->getValue());
6598     return Known.contains(Assumed) && Known != Assumed;
6599   }
6600 
6601   /// Helper function to set range metadata.
6602   static bool
6603   setRangeMetadataIfisBetterRange(Instruction *I,
6604                                   const ConstantRange &AssumedConstantRange) {
6605     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6606     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6607       if (!AssumedConstantRange.isEmptySet()) {
6608         I->setMetadata(LLVMContext::MD_range,
6609                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6610                                                  AssumedConstantRange));
6611         return true;
6612       }
6613     }
6614     return false;
6615   }
6616 
6617   /// See AbstractAttribute::manifest()
6618   ChangeStatus manifest(Attributor &A) override {
6619     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6620     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6621     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6622 
6623     auto &V = getAssociatedValue();
6624     if (!AssumedConstantRange.isEmptySet() &&
6625         !AssumedConstantRange.isSingleElement()) {
6626       if (Instruction *I = dyn_cast<Instruction>(&V))
6627         if (isa<CallInst>(I) || isa<LoadInst>(I))
6628           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6629             Changed = ChangeStatus::CHANGED;
6630     }
6631 
6632     return Changed;
6633   }
6634 };
6635 
6636 struct AAValueConstantRangeArgument final
6637     : AAArgumentFromCallSiteArguments<
6638           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6639   using Base = AAArgumentFromCallSiteArguments<
6640       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6641   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6642       : Base(IRP, A) {}
6643 
6644   /// See AbstractAttribute::initialize(..).
6645   void initialize(Attributor &A) override {
6646     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6647       indicatePessimisticFixpoint();
6648     } else {
6649       Base::initialize(A);
6650     }
6651   }
6652 
6653   /// See AbstractAttribute::trackStatistics()
6654   void trackStatistics() const override {
6655     STATS_DECLTRACK_ARG_ATTR(value_range)
6656   }
6657 };
6658 
6659 struct AAValueConstantRangeReturned
6660     : AAReturnedFromReturnedValues<AAValueConstantRange,
6661                                    AAValueConstantRangeImpl> {
6662   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6663                                             AAValueConstantRangeImpl>;
6664   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6665       : Base(IRP, A) {}
6666 
6667   /// See AbstractAttribute::initialize(...).
6668   void initialize(Attributor &A) override {}
6669 
6670   /// See AbstractAttribute::trackStatistics()
6671   void trackStatistics() const override {
6672     STATS_DECLTRACK_FNRET_ATTR(value_range)
6673   }
6674 };
6675 
6676 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6677   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6678       : AAValueConstantRangeImpl(IRP, A) {}
6679 
6680   /// See AbstractAttribute::initialize(...).
6681   void initialize(Attributor &A) override {
6682     AAValueConstantRangeImpl::initialize(A);
6683     Value &V = getAssociatedValue();
6684 
6685     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6686       unionAssumed(ConstantRange(C->getValue()));
6687       indicateOptimisticFixpoint();
6688       return;
6689     }
6690 
6691     if (isa<UndefValue>(&V)) {
6692       // Collapse the undef state to 0.
6693       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6694       indicateOptimisticFixpoint();
6695       return;
6696     }
6697 
6698     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6699       return;
6700     // If it is a load instruction with range metadata, use it.
6701     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6702       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6703         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6704         return;
6705       }
6706 
6707     // We can work with PHI and select instruction as we traverse their operands
6708     // during update.
6709     if (isa<SelectInst>(V) || isa<PHINode>(V))
6710       return;
6711 
6712     // Otherwise we give up.
6713     indicatePessimisticFixpoint();
6714 
6715     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6716                       << getAssociatedValue() << "\n");
6717   }
6718 
6719   bool calculateBinaryOperator(
6720       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6721       const Instruction *CtxI,
6722       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6723     Value *LHS = BinOp->getOperand(0);
6724     Value *RHS = BinOp->getOperand(1);
6725     // TODO: Allow non integers as well.
6726     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6727       return false;
6728 
6729     auto &LHSAA =
6730         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6731     QuerriedAAs.push_back(&LHSAA);
6732     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6733 
6734     auto &RHSAA =
6735         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6736     QuerriedAAs.push_back(&RHSAA);
6737     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6738 
6739     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6740 
6741     T.unionAssumed(AssumedRange);
6742 
6743     // TODO: Track a known state too.
6744 
6745     return T.isValidState();
6746   }
6747 
6748   bool calculateCastInst(
6749       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6750       const Instruction *CtxI,
6751       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6752     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6753     // TODO: Allow non integers as well.
6754     Value &OpV = *CastI->getOperand(0);
6755     if (!OpV.getType()->isIntegerTy())
6756       return false;
6757 
6758     auto &OpAA =
6759         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6760     QuerriedAAs.push_back(&OpAA);
6761     T.unionAssumed(
6762         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6763     return T.isValidState();
6764   }
6765 
6766   bool
6767   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6768                    const Instruction *CtxI,
6769                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6770     Value *LHS = CmpI->getOperand(0);
6771     Value *RHS = CmpI->getOperand(1);
6772     // TODO: Allow non integers as well.
6773     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6774       return false;
6775 
6776     auto &LHSAA =
6777         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6778     QuerriedAAs.push_back(&LHSAA);
6779     auto &RHSAA =
6780         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6781     QuerriedAAs.push_back(&RHSAA);
6782 
6783     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6784     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6785 
6786     // If one of them is empty set, we can't decide.
6787     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6788       return true;
6789 
6790     bool MustTrue = false, MustFalse = false;
6791 
6792     auto AllowedRegion =
6793         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6794 
6795     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6796         CmpI->getPredicate(), RHSAARange);
6797 
6798     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6799       MustFalse = true;
6800 
6801     if (SatisfyingRegion.contains(LHSAARange))
6802       MustTrue = true;
6803 
6804     assert((!MustTrue || !MustFalse) &&
6805            "Either MustTrue or MustFalse should be false!");
6806 
6807     if (MustTrue)
6808       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6809     else if (MustFalse)
6810       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6811     else
6812       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6813 
6814     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6815                       << " " << RHSAA << "\n");
6816 
6817     // TODO: Track a known state too.
6818     return T.isValidState();
6819   }
6820 
6821   /// See AbstractAttribute::updateImpl(...).
6822   ChangeStatus updateImpl(Attributor &A) override {
6823     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6824                             IntegerRangeState &T, bool Stripped) -> bool {
6825       Instruction *I = dyn_cast<Instruction>(&V);
6826       if (!I || isa<CallBase>(I)) {
6827 
6828         // If the value is not instruction, we query AA to Attributor.
6829         const auto &AA =
6830             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6831 
6832         // Clamp operator is not used to utilize a program point CtxI.
6833         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6834 
6835         return T.isValidState();
6836       }
6837 
6838       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6839       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6840         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6841           return false;
6842       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6843         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6844           return false;
6845       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6846         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6847           return false;
6848       } else {
6849         // Give up with other instructions.
6850         // TODO: Add other instructions
6851 
6852         T.indicatePessimisticFixpoint();
6853         return false;
6854       }
6855 
6856       // Catch circular reasoning in a pessimistic way for now.
6857       // TODO: Check how the range evolves and if we stripped anything, see also
6858       //       AADereferenceable or AAAlign for similar situations.
6859       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6860         if (QueriedAA != this)
6861           continue;
6862         // If we are in a stady state we do not need to worry.
6863         if (T.getAssumed() == getState().getAssumed())
6864           continue;
6865         T.indicatePessimisticFixpoint();
6866       }
6867 
6868       return T.isValidState();
6869     };
6870 
6871     IntegerRangeState T(getBitWidth());
6872 
6873     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6874             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6875       return indicatePessimisticFixpoint();
6876 
6877     return clampStateAndIndicateChange(getState(), T);
6878   }
6879 
6880   /// See AbstractAttribute::trackStatistics()
6881   void trackStatistics() const override {
6882     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6883   }
6884 };
6885 
6886 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6887   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6888       : AAValueConstantRangeImpl(IRP, A) {}
6889 
6890   /// See AbstractAttribute::initialize(...).
6891   ChangeStatus updateImpl(Attributor &A) override {
6892     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6893                      "not be called");
6894   }
6895 
6896   /// See AbstractAttribute::trackStatistics()
6897   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6898 };
6899 
6900 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6901   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6902       : AAValueConstantRangeFunction(IRP, A) {}
6903 
6904   /// See AbstractAttribute::trackStatistics()
6905   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6906 };
6907 
6908 struct AAValueConstantRangeCallSiteReturned
6909     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6910                                      AAValueConstantRangeImpl> {
6911   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6912       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6913                                        AAValueConstantRangeImpl>(IRP, A) {}
6914 
6915   /// See AbstractAttribute::initialize(...).
6916   void initialize(Attributor &A) override {
6917     // If it is a load instruction with range metadata, use the metadata.
6918     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6919       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6920         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6921 
6922     AAValueConstantRangeImpl::initialize(A);
6923   }
6924 
6925   /// See AbstractAttribute::trackStatistics()
6926   void trackStatistics() const override {
6927     STATS_DECLTRACK_CSRET_ATTR(value_range)
6928   }
6929 };
6930 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6931   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6932       : AAValueConstantRangeFloating(IRP, A) {}
6933 
6934   /// See AbstractAttribute::trackStatistics()
6935   void trackStatistics() const override {
6936     STATS_DECLTRACK_CSARG_ATTR(value_range)
6937   }
6938 };
6939 } // namespace
6940 
6941 const char AAReturnedValues::ID = 0;
6942 const char AANoUnwind::ID = 0;
6943 const char AANoSync::ID = 0;
6944 const char AANoFree::ID = 0;
6945 const char AANonNull::ID = 0;
6946 const char AANoRecurse::ID = 0;
6947 const char AAWillReturn::ID = 0;
6948 const char AAUndefinedBehavior::ID = 0;
6949 const char AANoAlias::ID = 0;
6950 const char AAReachability::ID = 0;
6951 const char AANoReturn::ID = 0;
6952 const char AAIsDead::ID = 0;
6953 const char AADereferenceable::ID = 0;
6954 const char AAAlign::ID = 0;
6955 const char AANoCapture::ID = 0;
6956 const char AAValueSimplify::ID = 0;
6957 const char AAHeapToStack::ID = 0;
6958 const char AAPrivatizablePtr::ID = 0;
6959 const char AAMemoryBehavior::ID = 0;
6960 const char AAMemoryLocation::ID = 0;
6961 const char AAValueConstantRange::ID = 0;
6962 
6963 // Macro magic to create the static generator function for attributes that
6964 // follow the naming scheme.
6965 
6966 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6967   case IRPosition::PK:                                                         \
6968     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6969 
6970 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6971   case IRPosition::PK:                                                         \
6972     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
6973     ++NumAAs;                                                                  \
6974     break;
6975 
6976 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6977   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6978     CLASS *AA = nullptr;                                                       \
6979     switch (IRP.getPositionKind()) {                                           \
6980       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6981       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6982       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6983       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6984       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6985       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6986       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6987       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6988     }                                                                          \
6989     return *AA;                                                                \
6990   }
6991 
6992 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6993   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6994     CLASS *AA = nullptr;                                                       \
6995     switch (IRP.getPositionKind()) {                                           \
6996       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6997       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6998       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
6999       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7000       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7001       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7002       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7003       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7004     }                                                                          \
7005     return *AA;                                                                \
7006   }
7007 
7008 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7009   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7010     CLASS *AA = nullptr;                                                       \
7011     switch (IRP.getPositionKind()) {                                           \
7012       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7013       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7014       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7015       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7016       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7017       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7018       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7019       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7020     }                                                                          \
7021     return *AA;                                                                \
7022   }
7023 
7024 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7025   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7026     CLASS *AA = nullptr;                                                       \
7027     switch (IRP.getPositionKind()) {                                           \
7028       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7029       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7030       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7031       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7032       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7033       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7034       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7035       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7036     }                                                                          \
7037     return *AA;                                                                \
7038   }
7039 
7040 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7041   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7042     CLASS *AA = nullptr;                                                       \
7043     switch (IRP.getPositionKind()) {                                           \
7044       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7045       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7046       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7047       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7048       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7049       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7050       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7051       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7052     }                                                                          \
7053     return *AA;                                                                \
7054   }
7055 
7056 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7057 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7058 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7059 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7060 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7061 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7062 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7063 
7064 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7065 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7066 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7067 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7068 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7069 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7070 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7071 
7072 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7073 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7074 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7075 
7076 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7077 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7078 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7079 
7080 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7081 
7082 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7083 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7084 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7085 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7086 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7087 #undef SWITCH_PK_CREATE
7088 #undef SWITCH_PK_INV
7089