1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumeBundleQueries.h"
19 #include "llvm/Analysis/CaptureTracking.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/MemoryBuiltins.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 
30 #include <cassert>
31 
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "attributor"
35 
36 static cl::opt<bool> ManifestInternal(
37     "attributor-manifest-internal", cl::Hidden,
38     cl::desc("Manifest Attributor internal string attributes."),
39     cl::init(false));
40 
41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
42                                        cl::Hidden);
43 
44 STATISTIC(NumAAs, "Number of abstract attributes created");
45 
46 // Some helper macros to deal with statistics tracking.
47 //
48 // Usage:
49 // For simple IR attribute tracking overload trackStatistics in the abstract
50 // attribute and choose the right STATS_DECLTRACK_********* macro,
51 // e.g.,:
52 //  void trackStatistics() const override {
53 //    STATS_DECLTRACK_ARG_ATTR(returned)
54 //  }
55 // If there is a single "increment" side one can use the macro
56 // STATS_DECLTRACK with a custom message. If there are multiple increment
57 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
58 //
59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
60   ("Number of " #TYPE " marked '" #NAME "'")
61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
63 #define STATS_DECL(NAME, TYPE, MSG)                                            \
64   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
66 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
67   {                                                                            \
68     STATS_DECL(NAME, TYPE, MSG)                                                \
69     STATS_TRACK(NAME, TYPE)                                                    \
70   }
71 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
72   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
73 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
74   STATS_DECLTRACK(NAME, CSArguments,                                           \
75                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
76 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
77   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
78 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
79   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
80 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
82                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
83 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
84   STATS_DECLTRACK(NAME, CSReturn,                                              \
85                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
87   STATS_DECLTRACK(NAME, Floating,                                              \
88                   ("Number of floating values known to be '" #NAME "'"))
89 
90 // Specialization of the operator<< for abstract attributes subclasses. This
91 // disambiguates situations where multiple operators are applicable.
92 namespace llvm {
93 #define PIPE_OPERATOR(CLASS)                                                   \
94   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
95     return OS << static_cast<const AbstractAttribute &>(AA);                   \
96   }
97 
98 PIPE_OPERATOR(AAIsDead)
99 PIPE_OPERATOR(AANoUnwind)
100 PIPE_OPERATOR(AANoSync)
101 PIPE_OPERATOR(AANoRecurse)
102 PIPE_OPERATOR(AAWillReturn)
103 PIPE_OPERATOR(AANoReturn)
104 PIPE_OPERATOR(AAReturnedValues)
105 PIPE_OPERATOR(AANonNull)
106 PIPE_OPERATOR(AANoAlias)
107 PIPE_OPERATOR(AADereferenceable)
108 PIPE_OPERATOR(AAAlign)
109 PIPE_OPERATOR(AANoCapture)
110 PIPE_OPERATOR(AAValueSimplify)
111 PIPE_OPERATOR(AANoFree)
112 PIPE_OPERATOR(AAHeapToStack)
113 PIPE_OPERATOR(AAReachability)
114 PIPE_OPERATOR(AAMemoryBehavior)
115 PIPE_OPERATOR(AAMemoryLocation)
116 PIPE_OPERATOR(AAValueConstantRange)
117 PIPE_OPERATOR(AAPrivatizablePtr)
118 PIPE_OPERATOR(AAUndefinedBehavior)
119 
120 #undef PIPE_OPERATOR
121 } // namespace llvm
122 
123 namespace {
124 
125 static Optional<ConstantInt *>
126 getAssumedConstantInt(Attributor &A, const Value &V,
127                       const AbstractAttribute &AA,
128                       bool &UsedAssumedInformation) {
129   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
130   if (C.hasValue())
131     return dyn_cast_or_null<ConstantInt>(C.getValue());
132   return llvm::None;
133 }
134 
135 /// Get pointer operand of memory accessing instruction. If \p I is
136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
137 /// is set to false and the instruction is volatile, return nullptr.
138 static const Value *getPointerOperand(const Instruction *I,
139                                       bool AllowVolatile) {
140   if (auto *LI = dyn_cast<LoadInst>(I)) {
141     if (!AllowVolatile && LI->isVolatile())
142       return nullptr;
143     return LI->getPointerOperand();
144   }
145 
146   if (auto *SI = dyn_cast<StoreInst>(I)) {
147     if (!AllowVolatile && SI->isVolatile())
148       return nullptr;
149     return SI->getPointerOperand();
150   }
151 
152   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
153     if (!AllowVolatile && CXI->isVolatile())
154       return nullptr;
155     return CXI->getPointerOperand();
156   }
157 
158   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
159     if (!AllowVolatile && RMWI->isVolatile())
160       return nullptr;
161     return RMWI->getPointerOperand();
162   }
163 
164   return nullptr;
165 }
166 
167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
169 /// getelement pointer instructions that traverse the natural type of \p Ptr if
170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
171 /// through a cast to i8*.
172 ///
173 /// TODO: This could probably live somewhere more prominantly if it doesn't
174 ///       already exist.
175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
176                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
177   assert(Offset >= 0 && "Negative offset not supported yet!");
178   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
179                     << "-bytes as " << *ResTy << "\n");
180 
181   // The initial type we are trying to traverse to get nice GEPs.
182   Type *Ty = Ptr->getType();
183 
184   SmallVector<Value *, 4> Indices;
185   std::string GEPName = Ptr->getName().str();
186   while (Offset) {
187     uint64_t Idx, Rem;
188 
189     if (auto *STy = dyn_cast<StructType>(Ty)) {
190       const StructLayout *SL = DL.getStructLayout(STy);
191       if (int64_t(SL->getSizeInBytes()) < Offset)
192         break;
193       Idx = SL->getElementContainingOffset(Offset);
194       assert(Idx < STy->getNumElements() && "Offset calculation error!");
195       Rem = Offset - SL->getElementOffset(Idx);
196       Ty = STy->getElementType(Idx);
197     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
198       Ty = PTy->getElementType();
199       if (!Ty->isSized())
200         break;
201       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
202       assert(ElementSize && "Expected type with size!");
203       Idx = Offset / ElementSize;
204       Rem = Offset % ElementSize;
205     } else {
206       // Non-aggregate type, we cast and make byte-wise progress now.
207       break;
208     }
209 
210     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213     GEPName += "." + std::to_string(Idx);
214     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215     Offset = Rem;
216   }
217 
218   // Create a GEP if we collected indices above.
219   if (Indices.size())
220     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
221 
222   // If an offset is left we use byte-wise adjustment.
223   if (Offset) {
224     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
225     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
226                         GEPName + ".b" + Twine(Offset));
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
257         /* TrackDependence */ false);
258   bool AnyDead = false;
259 
260   using Item = std::pair<Value *, const Instruction *>;
261   SmallSet<Item, 16> Visited;
262   SmallVector<Item, 16> Worklist;
263   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
264 
265   int Iteration = 0;
266   do {
267     Item I = Worklist.pop_back_val();
268     Value *V = I.first;
269     CtxI = I.second;
270     if (StripCB)
271       V = StripCB(V);
272 
273     // Check if we should process the current value. To prevent endless
274     // recursion keep a record of the values we followed!
275     if (!Visited.insert(I).second)
276       continue;
277 
278     // Make sure we limit the compile time for complex expressions.
279     if (Iteration++ >= MaxValues)
280       return false;
281 
282     // Explicitly look through calls with a "returned" attribute if we do
283     // not have a pointer as stripPointerCasts only works on them.
284     Value *NewV = nullptr;
285     if (V->getType()->isPointerTy()) {
286       NewV = V->stripPointerCasts();
287     } else {
288       auto *CB = dyn_cast<CallBase>(V);
289       if (CB && CB->getCalledFunction()) {
290         for (Argument &Arg : CB->getCalledFunction()->args())
291           if (Arg.hasReturnedAttr()) {
292             NewV = CB->getArgOperand(Arg.getArgNo());
293             break;
294           }
295       }
296     }
297     if (NewV && NewV != V) {
298       Worklist.push_back({NewV, CtxI});
299       continue;
300     }
301 
302     // Look through select instructions, visit both potential values.
303     if (auto *SI = dyn_cast<SelectInst>(V)) {
304       Worklist.push_back({SI->getTrueValue(), CtxI});
305       Worklist.push_back({SI->getFalseValue(), CtxI});
306       continue;
307     }
308 
309     // Look through phi nodes, visit all live operands.
310     if (auto *PHI = dyn_cast<PHINode>(V)) {
311       assert(LivenessAA &&
312              "Expected liveness in the presence of instructions!");
313       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
314         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
315         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
316                             LivenessAA,
317                             /* CheckBBLivenessOnly */ true)) {
318           AnyDead = true;
319           continue;
320         }
321         Worklist.push_back(
322             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
323       }
324       continue;
325     }
326 
327     // Once a leaf is reached we inform the user through the callback.
328     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
329       return false;
330   } while (!Worklist.empty());
331 
332   // If we actually used liveness information so we have to record a dependence.
333   if (AnyDead)
334     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
335 
336   // All values have been visited.
337   return true;
338 }
339 
340 static const Value *
341 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
342                                      const DataLayout &DL,
343                                      bool AllowNonInbounds = false) {
344   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
345   if (!Ptr)
346     return nullptr;
347 
348   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
349                                           AllowNonInbounds);
350 }
351 
352 /// Helper function to clamp a state \p S of type \p StateType with the
353 /// information in \p R and indicate/return if \p S did change (as-in update is
354 /// required to be run again).
355 template <typename StateType>
356 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
357   auto Assumed = S.getAssumed();
358   S ^= R;
359   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
360                                    : ChangeStatus::CHANGED;
361 }
362 
363 /// Clamp the information known for all returned values of a function
364 /// (identified by \p QueryingAA) into \p S.
365 template <typename AAType, typename StateType = typename AAType::StateType>
366 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
367                                      StateType &S) {
368   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
369                     << QueryingAA << " into " << S << "\n");
370 
371   assert((QueryingAA.getIRPosition().getPositionKind() ==
372               IRPosition::IRP_RETURNED ||
373           QueryingAA.getIRPosition().getPositionKind() ==
374               IRPosition::IRP_CALL_SITE_RETURNED) &&
375          "Can only clamp returned value states for a function returned or call "
376          "site returned position!");
377 
378   // Use an optional state as there might not be any return values and we want
379   // to join (IntegerState::operator&) the state of all there are.
380   Optional<StateType> T;
381 
382   // Callback for each possibly returned value.
383   auto CheckReturnValue = [&](Value &RV) -> bool {
384     const IRPosition &RVPos = IRPosition::value(RV);
385     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
386     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
387                       << " @ " << RVPos << "\n");
388     const StateType &AAS = static_cast<const StateType &>(AA.getState());
389     if (T.hasValue())
390       *T &= AAS;
391     else
392       T = AAS;
393     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
394                       << "\n");
395     return T->isValidState();
396   };
397 
398   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
399     S.indicatePessimisticFixpoint();
400   else if (T.hasValue())
401     S ^= *T;
402 }
403 
404 /// Helper class for generic deduction: return value -> returned position.
405 template <typename AAType, typename BaseType,
406           typename StateType = typename BaseType::StateType>
407 struct AAReturnedFromReturnedValues : public BaseType {
408   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
409       : BaseType(IRP, A) {}
410 
411   /// See AbstractAttribute::updateImpl(...).
412   ChangeStatus updateImpl(Attributor &A) override {
413     StateType S(StateType::getBestState(this->getState()));
414     clampReturnedValueStates<AAType, StateType>(A, *this, S);
415     // TODO: If we know we visited all returned values, thus no are assumed
416     // dead, we can take the known information from the state T.
417     return clampStateAndIndicateChange<StateType>(this->getState(), S);
418   }
419 };
420 
421 /// Clamp the information known at all call sites for a given argument
422 /// (identified by \p QueryingAA) into \p S.
423 template <typename AAType, typename StateType = typename AAType::StateType>
424 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
425                                         StateType &S) {
426   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
427                     << QueryingAA << " into " << S << "\n");
428 
429   assert(QueryingAA.getIRPosition().getPositionKind() ==
430              IRPosition::IRP_ARGUMENT &&
431          "Can only clamp call site argument states for an argument position!");
432 
433   // Use an optional state as there might not be any return values and we want
434   // to join (IntegerState::operator&) the state of all there are.
435   Optional<StateType> T;
436 
437   // The argument number which is also the call site argument number.
438   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
439 
440   auto CallSiteCheck = [&](AbstractCallSite ACS) {
441     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
442     // Check if a coresponding argument was found or if it is on not associated
443     // (which can happen for callback calls).
444     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
445       return false;
446 
447     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
448     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
449                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
450     const StateType &AAS = static_cast<const StateType &>(AA.getState());
451     if (T.hasValue())
452       *T &= AAS;
453     else
454       T = AAS;
455     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
456                       << "\n");
457     return T->isValidState();
458   };
459 
460   bool AllCallSitesKnown;
461   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
462                               AllCallSitesKnown))
463     S.indicatePessimisticFixpoint();
464   else if (T.hasValue())
465     S ^= *T;
466 }
467 
468 /// Helper class for generic deduction: call site argument -> argument position.
469 template <typename AAType, typename BaseType,
470           typename StateType = typename AAType::StateType>
471 struct AAArgumentFromCallSiteArguments : public BaseType {
472   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
473       : BaseType(IRP, A) {}
474 
475   /// See AbstractAttribute::updateImpl(...).
476   ChangeStatus updateImpl(Attributor &A) override {
477     StateType S(StateType::getBestState(this->getState()));
478     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
479     // TODO: If we know we visited all incoming values, thus no are assumed
480     // dead, we can take the known information from the state T.
481     return clampStateAndIndicateChange<StateType>(this->getState(), S);
482   }
483 };
484 
485 /// Helper class for generic replication: function returned -> cs returned.
486 template <typename AAType, typename BaseType,
487           typename StateType = typename BaseType::StateType>
488 struct AACallSiteReturnedFromReturned : public BaseType {
489   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
490       : BaseType(IRP, A) {}
491 
492   /// See AbstractAttribute::updateImpl(...).
493   ChangeStatus updateImpl(Attributor &A) override {
494     assert(this->getIRPosition().getPositionKind() ==
495                IRPosition::IRP_CALL_SITE_RETURNED &&
496            "Can only wrap function returned positions for call site returned "
497            "positions!");
498     auto &S = this->getState();
499 
500     const Function *AssociatedFunction =
501         this->getIRPosition().getAssociatedFunction();
502     if (!AssociatedFunction)
503       return S.indicatePessimisticFixpoint();
504 
505     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
506     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
507     return clampStateAndIndicateChange(
508         S, static_cast<const StateType &>(AA.getState()));
509   }
510 };
511 
512 /// Helper function to accumulate uses.
513 template <class AAType, typename StateType = typename AAType::StateType>
514 static void followUsesInContext(AAType &AA, Attributor &A,
515                                 MustBeExecutedContextExplorer &Explorer,
516                                 const Instruction *CtxI,
517                                 SetVector<const Use *> &Uses,
518                                 StateType &State) {
519   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
520   for (unsigned u = 0; u < Uses.size(); ++u) {
521     const Use *U = Uses[u];
522     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
523       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
524       if (Found && AA.followUseInMBEC(A, U, UserI, State))
525         for (const Use &Us : UserI->uses())
526           Uses.insert(&Us);
527     }
528   }
529 }
530 
531 /// Use the must-be-executed-context around \p I to add information into \p S.
532 /// The AAType class is required to have `followUseInMBEC` method with the
533 /// following signature and behaviour:
534 ///
535 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
536 /// U - Underlying use.
537 /// I - The user of the \p U.
538 /// Returns true if the value should be tracked transitively.
539 ///
540 template <class AAType, typename StateType = typename AAType::StateType>
541 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
542                             Instruction &CtxI) {
543 
544   // Container for (transitive) uses of the associated value.
545   SetVector<const Use *> Uses;
546   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
547     Uses.insert(&U);
548 
549   MustBeExecutedContextExplorer &Explorer =
550       A.getInfoCache().getMustBeExecutedContextExplorer();
551 
552   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
553 
554   if (S.isAtFixpoint())
555     return;
556 
557   SmallVector<const BranchInst *, 4> BrInsts;
558   auto Pred = [&](const Instruction *I) {
559     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
560       if (Br->isConditional())
561         BrInsts.push_back(Br);
562     return true;
563   };
564 
565   // Here, accumulate conditional branch instructions in the context. We
566   // explore the child paths and collect the known states. The disjunction of
567   // those states can be merged to its own state. Let ParentState_i be a state
568   // to indicate the known information for an i-th branch instruction in the
569   // context. ChildStates are created for its successors respectively.
570   //
571   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
572   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
573   //      ...
574   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
575   //
576   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
577   //
578   // FIXME: Currently, recursive branches are not handled. For example, we
579   // can't deduce that ptr must be dereferenced in below function.
580   //
581   // void f(int a, int c, int *ptr) {
582   //    if(a)
583   //      if (b) {
584   //        *ptr = 0;
585   //      } else {
586   //        *ptr = 1;
587   //      }
588   //    else {
589   //      if (b) {
590   //        *ptr = 0;
591   //      } else {
592   //        *ptr = 1;
593   //      }
594   //    }
595   // }
596 
597   Explorer.checkForAllContext(&CtxI, Pred);
598   for (const BranchInst *Br : BrInsts) {
599     StateType ParentState;
600 
601     // The known state of the parent state is a conjunction of children's
602     // known states so it is initialized with a best state.
603     ParentState.indicateOptimisticFixpoint();
604 
605     for (const BasicBlock *BB : Br->successors()) {
606       StateType ChildState;
607 
608       size_t BeforeSize = Uses.size();
609       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
610 
611       // Erase uses which only appear in the child.
612       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
613         It = Uses.erase(It);
614 
615       ParentState &= ChildState;
616     }
617 
618     // Use only known state.
619     S += ParentState;
620   }
621 }
622 
623 /// -----------------------NoUnwind Function Attribute--------------------------
624 
625 struct AANoUnwindImpl : AANoUnwind {
626   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
627 
628   const std::string getAsStr() const override {
629     return getAssumed() ? "nounwind" : "may-unwind";
630   }
631 
632   /// See AbstractAttribute::updateImpl(...).
633   ChangeStatus updateImpl(Attributor &A) override {
634     auto Opcodes = {
635         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
636         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
637         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
638 
639     auto CheckForNoUnwind = [&](Instruction &I) {
640       if (!I.mayThrow())
641         return true;
642 
643       if (const auto *CB = dyn_cast<CallBase>(&I)) {
644         const auto &NoUnwindAA =
645             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
646         return NoUnwindAA.isAssumedNoUnwind();
647       }
648       return false;
649     };
650 
651     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
652       return indicatePessimisticFixpoint();
653 
654     return ChangeStatus::UNCHANGED;
655   }
656 };
657 
658 struct AANoUnwindFunction final : public AANoUnwindImpl {
659   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
660       : AANoUnwindImpl(IRP, A) {}
661 
662   /// See AbstractAttribute::trackStatistics()
663   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
664 };
665 
666 /// NoUnwind attribute deduction for a call sites.
667 struct AANoUnwindCallSite final : AANoUnwindImpl {
668   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
669       : AANoUnwindImpl(IRP, A) {}
670 
671   /// See AbstractAttribute::initialize(...).
672   void initialize(Attributor &A) override {
673     AANoUnwindImpl::initialize(A);
674     Function *F = getAssociatedFunction();
675     if (!F)
676       indicatePessimisticFixpoint();
677   }
678 
679   /// See AbstractAttribute::updateImpl(...).
680   ChangeStatus updateImpl(Attributor &A) override {
681     // TODO: Once we have call site specific value information we can provide
682     //       call site specific liveness information and then it makes
683     //       sense to specialize attributes for call sites arguments instead of
684     //       redirecting requests to the callee argument.
685     Function *F = getAssociatedFunction();
686     const IRPosition &FnPos = IRPosition::function(*F);
687     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
688     return clampStateAndIndicateChange(
689         getState(),
690         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
691   }
692 
693   /// See AbstractAttribute::trackStatistics()
694   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
695 };
696 
697 /// --------------------- Function Return Values -------------------------------
698 
699 /// "Attribute" that collects all potential returned values and the return
700 /// instructions that they arise from.
701 ///
702 /// If there is a unique returned value R, the manifest method will:
703 ///   - mark R with the "returned" attribute, if R is an argument.
704 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
705 
706   /// Mapping of values potentially returned by the associated function to the
707   /// return instructions that might return them.
708   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
709 
710   /// Mapping to remember the number of returned values for a call site such
711   /// that we can avoid updates if nothing changed.
712   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
713 
714   /// Set of unresolved calls returned by the associated function.
715   SmallSetVector<CallBase *, 4> UnresolvedCalls;
716 
717   /// State flags
718   ///
719   ///{
720   bool IsFixed = false;
721   bool IsValidState = true;
722   ///}
723 
724 public:
725   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
726       : AAReturnedValues(IRP, A) {}
727 
728   /// See AbstractAttribute::initialize(...).
729   void initialize(Attributor &A) override {
730     // Reset the state.
731     IsFixed = false;
732     IsValidState = true;
733     ReturnedValues.clear();
734 
735     Function *F = getAssociatedFunction();
736     if (!F) {
737       indicatePessimisticFixpoint();
738       return;
739     }
740     assert(!F->getReturnType()->isVoidTy() &&
741            "Did not expect a void return type!");
742 
743     // The map from instruction opcodes to those instructions in the function.
744     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
745 
746     // Look through all arguments, if one is marked as returned we are done.
747     for (Argument &Arg : F->args()) {
748       if (Arg.hasReturnedAttr()) {
749         auto &ReturnInstSet = ReturnedValues[&Arg];
750         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
751           for (Instruction *RI : *Insts)
752             ReturnInstSet.insert(cast<ReturnInst>(RI));
753 
754         indicateOptimisticFixpoint();
755         return;
756       }
757     }
758 
759     if (!A.isFunctionIPOAmendable(*F))
760       indicatePessimisticFixpoint();
761   }
762 
763   /// See AbstractAttribute::manifest(...).
764   ChangeStatus manifest(Attributor &A) override;
765 
766   /// See AbstractAttribute::getState(...).
767   AbstractState &getState() override { return *this; }
768 
769   /// See AbstractAttribute::getState(...).
770   const AbstractState &getState() const override { return *this; }
771 
772   /// See AbstractAttribute::updateImpl(Attributor &A).
773   ChangeStatus updateImpl(Attributor &A) override;
774 
775   llvm::iterator_range<iterator> returned_values() override {
776     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
777   }
778 
779   llvm::iterator_range<const_iterator> returned_values() const override {
780     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
781   }
782 
783   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
784     return UnresolvedCalls;
785   }
786 
787   /// Return the number of potential return values, -1 if unknown.
788   size_t getNumReturnValues() const override {
789     return isValidState() ? ReturnedValues.size() : -1;
790   }
791 
792   /// Return an assumed unique return value if a single candidate is found. If
793   /// there cannot be one, return a nullptr. If it is not clear yet, return the
794   /// Optional::NoneType.
795   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
796 
797   /// See AbstractState::checkForAllReturnedValues(...).
798   bool checkForAllReturnedValuesAndReturnInsts(
799       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
800       const override;
801 
802   /// Pretty print the attribute similar to the IR representation.
803   const std::string getAsStr() const override;
804 
805   /// See AbstractState::isAtFixpoint().
806   bool isAtFixpoint() const override { return IsFixed; }
807 
808   /// See AbstractState::isValidState().
809   bool isValidState() const override { return IsValidState; }
810 
811   /// See AbstractState::indicateOptimisticFixpoint(...).
812   ChangeStatus indicateOptimisticFixpoint() override {
813     IsFixed = true;
814     return ChangeStatus::UNCHANGED;
815   }
816 
817   ChangeStatus indicatePessimisticFixpoint() override {
818     IsFixed = true;
819     IsValidState = false;
820     return ChangeStatus::CHANGED;
821   }
822 };
823 
824 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
825   ChangeStatus Changed = ChangeStatus::UNCHANGED;
826 
827   // Bookkeeping.
828   assert(isValidState());
829   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
830                   "Number of function with known return values");
831 
832   // Check if we have an assumed unique return value that we could manifest.
833   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
834 
835   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
836     return Changed;
837 
838   // Bookkeeping.
839   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
840                   "Number of function with unique return");
841 
842   // Callback to replace the uses of CB with the constant C.
843   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
844     if (CB.use_empty())
845       return ChangeStatus::UNCHANGED;
846     if (A.changeValueAfterManifest(CB, C))
847       return ChangeStatus::CHANGED;
848     return ChangeStatus::UNCHANGED;
849   };
850 
851   // If the assumed unique return value is an argument, annotate it.
852   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
853     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
854             getAssociatedFunction()->getReturnType())) {
855       getIRPosition() = IRPosition::argument(*UniqueRVArg);
856       Changed = IRAttribute::manifest(A);
857     }
858   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
859     // We can replace the returned value with the unique returned constant.
860     Value &AnchorValue = getAnchorValue();
861     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
862       for (const Use &U : F->uses())
863         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
864           if (CB->isCallee(&U)) {
865             Constant *RVCCast =
866                 CB->getType() == RVC->getType()
867                     ? RVC
868                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
869             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
870           }
871     } else {
872       assert(isa<CallBase>(AnchorValue) &&
873              "Expcected a function or call base anchor!");
874       Constant *RVCCast =
875           AnchorValue.getType() == RVC->getType()
876               ? RVC
877               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
878       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
879     }
880     if (Changed == ChangeStatus::CHANGED)
881       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
882                       "Number of function returns replaced by constant return");
883   }
884 
885   return Changed;
886 }
887 
888 const std::string AAReturnedValuesImpl::getAsStr() const {
889   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
890          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
891          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
892 }
893 
894 Optional<Value *>
895 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
896   // If checkForAllReturnedValues provides a unique value, ignoring potential
897   // undef values that can also be present, it is assumed to be the actual
898   // return value and forwarded to the caller of this method. If there are
899   // multiple, a nullptr is returned indicating there cannot be a unique
900   // returned value.
901   Optional<Value *> UniqueRV;
902 
903   auto Pred = [&](Value &RV) -> bool {
904     // If we found a second returned value and neither the current nor the saved
905     // one is an undef, there is no unique returned value. Undefs are special
906     // since we can pretend they have any value.
907     if (UniqueRV.hasValue() && UniqueRV != &RV &&
908         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
909       UniqueRV = nullptr;
910       return false;
911     }
912 
913     // Do not overwrite a value with an undef.
914     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
915       UniqueRV = &RV;
916 
917     return true;
918   };
919 
920   if (!A.checkForAllReturnedValues(Pred, *this))
921     UniqueRV = nullptr;
922 
923   return UniqueRV;
924 }
925 
926 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
927     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
928     const {
929   if (!isValidState())
930     return false;
931 
932   // Check all returned values but ignore call sites as long as we have not
933   // encountered an overdefined one during an update.
934   for (auto &It : ReturnedValues) {
935     Value *RV = It.first;
936 
937     CallBase *CB = dyn_cast<CallBase>(RV);
938     if (CB && !UnresolvedCalls.count(CB))
939       continue;
940 
941     if (!Pred(*RV, It.second))
942       return false;
943   }
944 
945   return true;
946 }
947 
948 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
949   size_t NumUnresolvedCalls = UnresolvedCalls.size();
950   bool Changed = false;
951 
952   // State used in the value traversals starting in returned values.
953   struct RVState {
954     // The map in which we collect return values -> return instrs.
955     decltype(ReturnedValues) &RetValsMap;
956     // The flag to indicate a change.
957     bool &Changed;
958     // The return instrs we come from.
959     SmallSetVector<ReturnInst *, 4> RetInsts;
960   };
961 
962   // Callback for a leaf value returned by the associated function.
963   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
964                          bool) -> bool {
965     auto Size = RVS.RetValsMap[&Val].size();
966     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
967     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
968     RVS.Changed |= Inserted;
969     LLVM_DEBUG({
970       if (Inserted)
971         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
972                << " => " << RVS.RetInsts.size() << "\n";
973     });
974     return true;
975   };
976 
977   // Helper method to invoke the generic value traversal.
978   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
979                                 const Instruction *CtxI) {
980     IRPosition RetValPos = IRPosition::value(RV);
981     return genericValueTraversal<AAReturnedValues, RVState>(
982         A, RetValPos, *this, RVS, VisitValueCB, CtxI);
983   };
984 
985   // Callback for all "return intructions" live in the associated function.
986   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
987     ReturnInst &Ret = cast<ReturnInst>(I);
988     RVState RVS({ReturnedValues, Changed, {}});
989     RVS.RetInsts.insert(&Ret);
990     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
991   };
992 
993   // Start by discovering returned values from all live returned instructions in
994   // the associated function.
995   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
996     return indicatePessimisticFixpoint();
997 
998   // Once returned values "directly" present in the code are handled we try to
999   // resolve returned calls.
1000   decltype(ReturnedValues) NewRVsMap;
1001   for (auto &It : ReturnedValues) {
1002     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1003                       << " by #" << It.second.size() << " RIs\n");
1004     CallBase *CB = dyn_cast<CallBase>(It.first);
1005     if (!CB || UnresolvedCalls.count(CB))
1006       continue;
1007 
1008     if (!CB->getCalledFunction()) {
1009       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1010                         << "\n");
1011       UnresolvedCalls.insert(CB);
1012       continue;
1013     }
1014 
1015     // TODO: use the function scope once we have call site AAReturnedValues.
1016     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1017         *this, IRPosition::function(*CB->getCalledFunction()));
1018     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1019                       << RetValAA << "\n");
1020 
1021     // Skip dead ends, thus if we do not know anything about the returned
1022     // call we mark it as unresolved and it will stay that way.
1023     if (!RetValAA.getState().isValidState()) {
1024       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1025                         << "\n");
1026       UnresolvedCalls.insert(CB);
1027       continue;
1028     }
1029 
1030     // Do not try to learn partial information. If the callee has unresolved
1031     // return values we will treat the call as unresolved/opaque.
1032     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1033     if (!RetValAAUnresolvedCalls.empty()) {
1034       UnresolvedCalls.insert(CB);
1035       continue;
1036     }
1037 
1038     // Now check if we can track transitively returned values. If possible, thus
1039     // if all return value can be represented in the current scope, do so.
1040     bool Unresolved = false;
1041     for (auto &RetValAAIt : RetValAA.returned_values()) {
1042       Value *RetVal = RetValAAIt.first;
1043       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1044           isa<Constant>(RetVal))
1045         continue;
1046       // Anything that did not fit in the above categories cannot be resolved,
1047       // mark the call as unresolved.
1048       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1049                            "cannot be translated: "
1050                         << *RetVal << "\n");
1051       UnresolvedCalls.insert(CB);
1052       Unresolved = true;
1053       break;
1054     }
1055 
1056     if (Unresolved)
1057       continue;
1058 
1059     // Now track transitively returned values.
1060     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1061     if (NumRetAA == RetValAA.getNumReturnValues()) {
1062       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1063                            "changed since it was seen last\n");
1064       continue;
1065     }
1066     NumRetAA = RetValAA.getNumReturnValues();
1067 
1068     for (auto &RetValAAIt : RetValAA.returned_values()) {
1069       Value *RetVal = RetValAAIt.first;
1070       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1071         // Arguments are mapped to call site operands and we begin the traversal
1072         // again.
1073         bool Unused = false;
1074         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1075         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1076         continue;
1077       } else if (isa<CallBase>(RetVal)) {
1078         // Call sites are resolved by the callee attribute over time, no need to
1079         // do anything for us.
1080         continue;
1081       } else if (isa<Constant>(RetVal)) {
1082         // Constants are valid everywhere, we can simply take them.
1083         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1084         continue;
1085       }
1086     }
1087   }
1088 
1089   // To avoid modifications to the ReturnedValues map while we iterate over it
1090   // we kept record of potential new entries in a copy map, NewRVsMap.
1091   for (auto &It : NewRVsMap) {
1092     assert(!It.second.empty() && "Entry does not add anything.");
1093     auto &ReturnInsts = ReturnedValues[It.first];
1094     for (ReturnInst *RI : It.second)
1095       if (ReturnInsts.insert(RI)) {
1096         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1097                           << *It.first << " => " << *RI << "\n");
1098         Changed = true;
1099       }
1100   }
1101 
1102   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1103   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1104 }
1105 
1106 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1107   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1108       : AAReturnedValuesImpl(IRP, A) {}
1109 
1110   /// See AbstractAttribute::trackStatistics()
1111   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1112 };
1113 
1114 /// Returned values information for a call sites.
1115 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1116   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1117       : AAReturnedValuesImpl(IRP, A) {}
1118 
1119   /// See AbstractAttribute::initialize(...).
1120   void initialize(Attributor &A) override {
1121     // TODO: Once we have call site specific value information we can provide
1122     //       call site specific liveness information and then it makes
1123     //       sense to specialize attributes for call sites instead of
1124     //       redirecting requests to the callee.
1125     llvm_unreachable("Abstract attributes for returned values are not "
1126                      "supported for call sites yet!");
1127   }
1128 
1129   /// See AbstractAttribute::updateImpl(...).
1130   ChangeStatus updateImpl(Attributor &A) override {
1131     return indicatePessimisticFixpoint();
1132   }
1133 
1134   /// See AbstractAttribute::trackStatistics()
1135   void trackStatistics() const override {}
1136 };
1137 
1138 /// ------------------------ NoSync Function Attribute -------------------------
1139 
1140 struct AANoSyncImpl : AANoSync {
1141   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1142 
1143   const std::string getAsStr() const override {
1144     return getAssumed() ? "nosync" : "may-sync";
1145   }
1146 
1147   /// See AbstractAttribute::updateImpl(...).
1148   ChangeStatus updateImpl(Attributor &A) override;
1149 
1150   /// Helper function used to determine whether an instruction is non-relaxed
1151   /// atomic. In other words, if an atomic instruction does not have unordered
1152   /// or monotonic ordering
1153   static bool isNonRelaxedAtomic(Instruction *I);
1154 
1155   /// Helper function used to determine whether an instruction is volatile.
1156   static bool isVolatile(Instruction *I);
1157 
1158   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1159   /// memset).
1160   static bool isNoSyncIntrinsic(Instruction *I);
1161 };
1162 
1163 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1164   if (!I->isAtomic())
1165     return false;
1166 
1167   AtomicOrdering Ordering;
1168   switch (I->getOpcode()) {
1169   case Instruction::AtomicRMW:
1170     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1171     break;
1172   case Instruction::Store:
1173     Ordering = cast<StoreInst>(I)->getOrdering();
1174     break;
1175   case Instruction::Load:
1176     Ordering = cast<LoadInst>(I)->getOrdering();
1177     break;
1178   case Instruction::Fence: {
1179     auto *FI = cast<FenceInst>(I);
1180     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1181       return false;
1182     Ordering = FI->getOrdering();
1183     break;
1184   }
1185   case Instruction::AtomicCmpXchg: {
1186     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1187     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1188     // Only if both are relaxed, than it can be treated as relaxed.
1189     // Otherwise it is non-relaxed.
1190     if (Success != AtomicOrdering::Unordered &&
1191         Success != AtomicOrdering::Monotonic)
1192       return true;
1193     if (Failure != AtomicOrdering::Unordered &&
1194         Failure != AtomicOrdering::Monotonic)
1195       return true;
1196     return false;
1197   }
1198   default:
1199     llvm_unreachable(
1200         "New atomic operations need to be known in the attributor.");
1201   }
1202 
1203   // Relaxed.
1204   if (Ordering == AtomicOrdering::Unordered ||
1205       Ordering == AtomicOrdering::Monotonic)
1206     return false;
1207   return true;
1208 }
1209 
1210 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1211 /// FIXME: We should ipmrove the handling of intrinsics.
1212 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1213   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1214     switch (II->getIntrinsicID()) {
1215     /// Element wise atomic memory intrinsics are can only be unordered,
1216     /// therefore nosync.
1217     case Intrinsic::memset_element_unordered_atomic:
1218     case Intrinsic::memmove_element_unordered_atomic:
1219     case Intrinsic::memcpy_element_unordered_atomic:
1220       return true;
1221     case Intrinsic::memset:
1222     case Intrinsic::memmove:
1223     case Intrinsic::memcpy:
1224       if (!cast<MemIntrinsic>(II)->isVolatile())
1225         return true;
1226       return false;
1227     default:
1228       return false;
1229     }
1230   }
1231   return false;
1232 }
1233 
1234 bool AANoSyncImpl::isVolatile(Instruction *I) {
1235   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1236 
1237   switch (I->getOpcode()) {
1238   case Instruction::AtomicRMW:
1239     return cast<AtomicRMWInst>(I)->isVolatile();
1240   case Instruction::Store:
1241     return cast<StoreInst>(I)->isVolatile();
1242   case Instruction::Load:
1243     return cast<LoadInst>(I)->isVolatile();
1244   case Instruction::AtomicCmpXchg:
1245     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1246   default:
1247     return false;
1248   }
1249 }
1250 
1251 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1252 
1253   auto CheckRWInstForNoSync = [&](Instruction &I) {
1254     /// We are looking for volatile instructions or Non-Relaxed atomics.
1255     /// FIXME: We should improve the handling of intrinsics.
1256 
1257     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1258       return true;
1259 
1260     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1261       if (CB->hasFnAttr(Attribute::NoSync))
1262         return true;
1263 
1264       const auto &NoSyncAA =
1265           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1266       if (NoSyncAA.isAssumedNoSync())
1267         return true;
1268       return false;
1269     }
1270 
1271     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1272       return true;
1273 
1274     return false;
1275   };
1276 
1277   auto CheckForNoSync = [&](Instruction &I) {
1278     // At this point we handled all read/write effects and they are all
1279     // nosync, so they can be skipped.
1280     if (I.mayReadOrWriteMemory())
1281       return true;
1282 
1283     // non-convergent and readnone imply nosync.
1284     return !cast<CallBase>(I).isConvergent();
1285   };
1286 
1287   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1288       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1289     return indicatePessimisticFixpoint();
1290 
1291   return ChangeStatus::UNCHANGED;
1292 }
1293 
1294 struct AANoSyncFunction final : public AANoSyncImpl {
1295   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1296       : AANoSyncImpl(IRP, A) {}
1297 
1298   /// See AbstractAttribute::trackStatistics()
1299   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1300 };
1301 
1302 /// NoSync attribute deduction for a call sites.
1303 struct AANoSyncCallSite final : AANoSyncImpl {
1304   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1305       : AANoSyncImpl(IRP, A) {}
1306 
1307   /// See AbstractAttribute::initialize(...).
1308   void initialize(Attributor &A) override {
1309     AANoSyncImpl::initialize(A);
1310     Function *F = getAssociatedFunction();
1311     if (!F)
1312       indicatePessimisticFixpoint();
1313   }
1314 
1315   /// See AbstractAttribute::updateImpl(...).
1316   ChangeStatus updateImpl(Attributor &A) override {
1317     // TODO: Once we have call site specific value information we can provide
1318     //       call site specific liveness information and then it makes
1319     //       sense to specialize attributes for call sites arguments instead of
1320     //       redirecting requests to the callee argument.
1321     Function *F = getAssociatedFunction();
1322     const IRPosition &FnPos = IRPosition::function(*F);
1323     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1324     return clampStateAndIndicateChange(
1325         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1326   }
1327 
1328   /// See AbstractAttribute::trackStatistics()
1329   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1330 };
1331 
1332 /// ------------------------ No-Free Attributes ----------------------------
1333 
1334 struct AANoFreeImpl : public AANoFree {
1335   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1336 
1337   /// See AbstractAttribute::updateImpl(...).
1338   ChangeStatus updateImpl(Attributor &A) override {
1339     auto CheckForNoFree = [&](Instruction &I) {
1340       const auto &CB = cast<CallBase>(I);
1341       if (CB.hasFnAttr(Attribute::NoFree))
1342         return true;
1343 
1344       const auto &NoFreeAA =
1345           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1346       return NoFreeAA.isAssumedNoFree();
1347     };
1348 
1349     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1350       return indicatePessimisticFixpoint();
1351     return ChangeStatus::UNCHANGED;
1352   }
1353 
1354   /// See AbstractAttribute::getAsStr().
1355   const std::string getAsStr() const override {
1356     return getAssumed() ? "nofree" : "may-free";
1357   }
1358 };
1359 
1360 struct AANoFreeFunction final : public AANoFreeImpl {
1361   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1362       : AANoFreeImpl(IRP, A) {}
1363 
1364   /// See AbstractAttribute::trackStatistics()
1365   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1366 };
1367 
1368 /// NoFree attribute deduction for a call sites.
1369 struct AANoFreeCallSite final : AANoFreeImpl {
1370   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1371       : AANoFreeImpl(IRP, A) {}
1372 
1373   /// See AbstractAttribute::initialize(...).
1374   void initialize(Attributor &A) override {
1375     AANoFreeImpl::initialize(A);
1376     Function *F = getAssociatedFunction();
1377     if (!F)
1378       indicatePessimisticFixpoint();
1379   }
1380 
1381   /// See AbstractAttribute::updateImpl(...).
1382   ChangeStatus updateImpl(Attributor &A) override {
1383     // TODO: Once we have call site specific value information we can provide
1384     //       call site specific liveness information and then it makes
1385     //       sense to specialize attributes for call sites arguments instead of
1386     //       redirecting requests to the callee argument.
1387     Function *F = getAssociatedFunction();
1388     const IRPosition &FnPos = IRPosition::function(*F);
1389     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1390     return clampStateAndIndicateChange(
1391         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1392   }
1393 
1394   /// See AbstractAttribute::trackStatistics()
1395   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1396 };
1397 
1398 /// NoFree attribute for floating values.
1399 struct AANoFreeFloating : AANoFreeImpl {
1400   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1401       : AANoFreeImpl(IRP, A) {}
1402 
1403   /// See AbstractAttribute::trackStatistics()
1404   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1405 
1406   /// See Abstract Attribute::updateImpl(...).
1407   ChangeStatus updateImpl(Attributor &A) override {
1408     const IRPosition &IRP = getIRPosition();
1409 
1410     const auto &NoFreeAA =
1411         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1412     if (NoFreeAA.isAssumedNoFree())
1413       return ChangeStatus::UNCHANGED;
1414 
1415     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1416     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1417       Instruction *UserI = cast<Instruction>(U.getUser());
1418       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1419         if (CB->isBundleOperand(&U))
1420           return false;
1421         if (!CB->isArgOperand(&U))
1422           return true;
1423         unsigned ArgNo = CB->getArgOperandNo(&U);
1424 
1425         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1426             *this, IRPosition::callsite_argument(*CB, ArgNo));
1427         return NoFreeArg.isAssumedNoFree();
1428       }
1429 
1430       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1431           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1432         Follow = true;
1433         return true;
1434       }
1435       if (isa<ReturnInst>(UserI))
1436         return true;
1437 
1438       // Unknown user.
1439       return false;
1440     };
1441     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1442       return indicatePessimisticFixpoint();
1443 
1444     return ChangeStatus::UNCHANGED;
1445   }
1446 };
1447 
1448 /// NoFree attribute for a call site argument.
1449 struct AANoFreeArgument final : AANoFreeFloating {
1450   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1451       : AANoFreeFloating(IRP, A) {}
1452 
1453   /// See AbstractAttribute::trackStatistics()
1454   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1455 };
1456 
1457 /// NoFree attribute for call site arguments.
1458 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1459   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1460       : AANoFreeFloating(IRP, A) {}
1461 
1462   /// See AbstractAttribute::updateImpl(...).
1463   ChangeStatus updateImpl(Attributor &A) override {
1464     // TODO: Once we have call site specific value information we can provide
1465     //       call site specific liveness information and then it makes
1466     //       sense to specialize attributes for call sites arguments instead of
1467     //       redirecting requests to the callee argument.
1468     Argument *Arg = getAssociatedArgument();
1469     if (!Arg)
1470       return indicatePessimisticFixpoint();
1471     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1472     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1473     return clampStateAndIndicateChange(
1474         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1475   }
1476 
1477   /// See AbstractAttribute::trackStatistics()
1478   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1479 };
1480 
1481 /// NoFree attribute for function return value.
1482 struct AANoFreeReturned final : AANoFreeFloating {
1483   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1484       : AANoFreeFloating(IRP, A) {
1485     llvm_unreachable("NoFree is not applicable to function returns!");
1486   }
1487 
1488   /// See AbstractAttribute::initialize(...).
1489   void initialize(Attributor &A) override {
1490     llvm_unreachable("NoFree is not applicable to function returns!");
1491   }
1492 
1493   /// See AbstractAttribute::updateImpl(...).
1494   ChangeStatus updateImpl(Attributor &A) override {
1495     llvm_unreachable("NoFree is not applicable to function returns!");
1496   }
1497 
1498   /// See AbstractAttribute::trackStatistics()
1499   void trackStatistics() const override {}
1500 };
1501 
1502 /// NoFree attribute deduction for a call site return value.
1503 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1504   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1505       : AANoFreeFloating(IRP, A) {}
1506 
1507   ChangeStatus manifest(Attributor &A) override {
1508     return ChangeStatus::UNCHANGED;
1509   }
1510   /// See AbstractAttribute::trackStatistics()
1511   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1512 };
1513 
1514 /// ------------------------ NonNull Argument Attribute ------------------------
1515 static int64_t getKnownNonNullAndDerefBytesForUse(
1516     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1517     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1518   TrackUse = false;
1519 
1520   const Value *UseV = U->get();
1521   if (!UseV->getType()->isPointerTy())
1522     return 0;
1523 
1524   Type *PtrTy = UseV->getType();
1525   const Function *F = I->getFunction();
1526   bool NullPointerIsDefined =
1527       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1528   const DataLayout &DL = A.getInfoCache().getDL();
1529   if (const auto *CB = dyn_cast<CallBase>(I)) {
1530     if (CB->isBundleOperand(U)) {
1531       if (RetainedKnowledge RK = getKnowledgeFromUse(
1532               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1533         IsNonNull |=
1534             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1535         return RK.ArgValue;
1536       }
1537       return 0;
1538     }
1539 
1540     if (CB->isCallee(U)) {
1541       IsNonNull |= !NullPointerIsDefined;
1542       return 0;
1543     }
1544 
1545     unsigned ArgNo = CB->getArgOperandNo(U);
1546     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1547     // As long as we only use known information there is no need to track
1548     // dependences here.
1549     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1550                                                   /* TrackDependence */ false);
1551     IsNonNull |= DerefAA.isKnownNonNull();
1552     return DerefAA.getKnownDereferenceableBytes();
1553   }
1554 
1555   // We need to follow common pointer manipulation uses to the accesses they
1556   // feed into. We can try to be smart to avoid looking through things we do not
1557   // like for now, e.g., non-inbounds GEPs.
1558   if (isa<CastInst>(I)) {
1559     TrackUse = true;
1560     return 0;
1561   }
1562   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1563     if (GEP->hasAllConstantIndices()) {
1564       TrackUse = true;
1565       return 0;
1566     }
1567 
1568   int64_t Offset;
1569   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1570     if (Base == &AssociatedValue &&
1571         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1572       int64_t DerefBytes =
1573           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1574 
1575       IsNonNull |= !NullPointerIsDefined;
1576       return std::max(int64_t(0), DerefBytes);
1577     }
1578   }
1579 
1580   /// Corner case when an offset is 0.
1581   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1582           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1583     if (Offset == 0 && Base == &AssociatedValue &&
1584         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1585       int64_t DerefBytes =
1586           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1587       IsNonNull |= !NullPointerIsDefined;
1588       return std::max(int64_t(0), DerefBytes);
1589     }
1590   }
1591 
1592   return 0;
1593 }
1594 
1595 struct AANonNullImpl : AANonNull {
1596   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1597       : AANonNull(IRP, A),
1598         NullIsDefined(NullPointerIsDefined(
1599             getAnchorScope(),
1600             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1601 
1602   /// See AbstractAttribute::initialize(...).
1603   void initialize(Attributor &A) override {
1604     if (!NullIsDefined &&
1605         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1606                 /* IgnoreSubsumingPositions */ false, &A))
1607       indicateOptimisticFixpoint();
1608     else if (isa<ConstantPointerNull>(getAssociatedValue()))
1609       indicatePessimisticFixpoint();
1610     else
1611       AANonNull::initialize(A);
1612 
1613     if (!getState().isAtFixpoint())
1614       if (Instruction *CtxI = getCtxI())
1615         followUsesInMBEC(*this, A, getState(), *CtxI);
1616   }
1617 
1618   /// See followUsesInMBEC
1619   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1620                        AANonNull::StateType &State) {
1621     bool IsNonNull = false;
1622     bool TrackUse = false;
1623     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1624                                        IsNonNull, TrackUse);
1625     State.setKnown(IsNonNull);
1626     return TrackUse;
1627   }
1628 
1629   /// See AbstractAttribute::getAsStr().
1630   const std::string getAsStr() const override {
1631     return getAssumed() ? "nonnull" : "may-null";
1632   }
1633 
1634   /// Flag to determine if the underlying value can be null and still allow
1635   /// valid accesses.
1636   const bool NullIsDefined;
1637 };
1638 
1639 /// NonNull attribute for a floating value.
1640 struct AANonNullFloating : public AANonNullImpl {
1641   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1642       : AANonNullImpl(IRP, A) {}
1643 
1644   /// See AbstractAttribute::updateImpl(...).
1645   ChangeStatus updateImpl(Attributor &A) override {
1646     if (!NullIsDefined) {
1647       const auto &DerefAA =
1648           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1649       if (DerefAA.getAssumedDereferenceableBytes())
1650         return ChangeStatus::UNCHANGED;
1651     }
1652 
1653     const DataLayout &DL = A.getDataLayout();
1654 
1655     DominatorTree *DT = nullptr;
1656     AssumptionCache *AC = nullptr;
1657     InformationCache &InfoCache = A.getInfoCache();
1658     if (const Function *Fn = getAnchorScope()) {
1659       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1660       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1661     }
1662 
1663     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1664                             AANonNull::StateType &T, bool Stripped) -> bool {
1665       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1666       if (!Stripped && this == &AA) {
1667         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1668           T.indicatePessimisticFixpoint();
1669       } else {
1670         // Use abstract attribute information.
1671         const AANonNull::StateType &NS =
1672             static_cast<const AANonNull::StateType &>(AA.getState());
1673         T ^= NS;
1674       }
1675       return T.isValidState();
1676     };
1677 
1678     StateType T;
1679     if (!genericValueTraversal<AANonNull, StateType>(
1680             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1681       return indicatePessimisticFixpoint();
1682 
1683     return clampStateAndIndicateChange(getState(), T);
1684   }
1685 
1686   /// See AbstractAttribute::trackStatistics()
1687   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1688 };
1689 
1690 /// NonNull attribute for function return value.
1691 struct AANonNullReturned final
1692     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1693   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1694       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1695 
1696   /// See AbstractAttribute::trackStatistics()
1697   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1698 };
1699 
1700 /// NonNull attribute for function argument.
1701 struct AANonNullArgument final
1702     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1703   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1704       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1705 
1706   /// See AbstractAttribute::trackStatistics()
1707   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1708 };
1709 
1710 struct AANonNullCallSiteArgument final : AANonNullFloating {
1711   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1712       : AANonNullFloating(IRP, A) {}
1713 
1714   /// See AbstractAttribute::trackStatistics()
1715   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1716 };
1717 
1718 /// NonNull attribute for a call site return position.
1719 struct AANonNullCallSiteReturned final
1720     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1721   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1722       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1723 
1724   /// See AbstractAttribute::trackStatistics()
1725   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1726 };
1727 
1728 /// ------------------------ No-Recurse Attributes ----------------------------
1729 
1730 struct AANoRecurseImpl : public AANoRecurse {
1731   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1732 
1733   /// See AbstractAttribute::getAsStr()
1734   const std::string getAsStr() const override {
1735     return getAssumed() ? "norecurse" : "may-recurse";
1736   }
1737 };
1738 
1739 struct AANoRecurseFunction final : AANoRecurseImpl {
1740   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1741       : AANoRecurseImpl(IRP, A) {}
1742 
1743   /// See AbstractAttribute::initialize(...).
1744   void initialize(Attributor &A) override {
1745     AANoRecurseImpl::initialize(A);
1746     if (const Function *F = getAnchorScope())
1747       if (A.getInfoCache().getSccSize(*F) != 1)
1748         indicatePessimisticFixpoint();
1749   }
1750 
1751   /// See AbstractAttribute::updateImpl(...).
1752   ChangeStatus updateImpl(Attributor &A) override {
1753 
1754     // If all live call sites are known to be no-recurse, we are as well.
1755     auto CallSitePred = [&](AbstractCallSite ACS) {
1756       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1757           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1758           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1759       return NoRecurseAA.isKnownNoRecurse();
1760     };
1761     bool AllCallSitesKnown;
1762     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1763       // If we know all call sites and all are known no-recurse, we are done.
1764       // If all known call sites, which might not be all that exist, are known
1765       // to be no-recurse, we are not done but we can continue to assume
1766       // no-recurse. If one of the call sites we have not visited will become
1767       // live, another update is triggered.
1768       if (AllCallSitesKnown)
1769         indicateOptimisticFixpoint();
1770       return ChangeStatus::UNCHANGED;
1771     }
1772 
1773     // If the above check does not hold anymore we look at the calls.
1774     auto CheckForNoRecurse = [&](Instruction &I) {
1775       const auto &CB = cast<CallBase>(I);
1776       if (CB.hasFnAttr(Attribute::NoRecurse))
1777         return true;
1778 
1779       const auto &NoRecurseAA =
1780           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1781       if (!NoRecurseAA.isAssumedNoRecurse())
1782         return false;
1783 
1784       // Recursion to the same function
1785       if (CB.getCalledFunction() == getAnchorScope())
1786         return false;
1787 
1788       return true;
1789     };
1790 
1791     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1792       return indicatePessimisticFixpoint();
1793     return ChangeStatus::UNCHANGED;
1794   }
1795 
1796   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1797 };
1798 
1799 /// NoRecurse attribute deduction for a call sites.
1800 struct AANoRecurseCallSite final : AANoRecurseImpl {
1801   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1802       : AANoRecurseImpl(IRP, A) {}
1803 
1804   /// See AbstractAttribute::initialize(...).
1805   void initialize(Attributor &A) override {
1806     AANoRecurseImpl::initialize(A);
1807     Function *F = getAssociatedFunction();
1808     if (!F)
1809       indicatePessimisticFixpoint();
1810   }
1811 
1812   /// See AbstractAttribute::updateImpl(...).
1813   ChangeStatus updateImpl(Attributor &A) override {
1814     // TODO: Once we have call site specific value information we can provide
1815     //       call site specific liveness information and then it makes
1816     //       sense to specialize attributes for call sites arguments instead of
1817     //       redirecting requests to the callee argument.
1818     Function *F = getAssociatedFunction();
1819     const IRPosition &FnPos = IRPosition::function(*F);
1820     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1821     return clampStateAndIndicateChange(
1822         getState(),
1823         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1824   }
1825 
1826   /// See AbstractAttribute::trackStatistics()
1827   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1828 };
1829 
1830 /// -------------------- Undefined-Behavior Attributes ------------------------
1831 
1832 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1833   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1834       : AAUndefinedBehavior(IRP, A) {}
1835 
1836   /// See AbstractAttribute::updateImpl(...).
1837   // through a pointer (i.e. also branches etc.)
1838   ChangeStatus updateImpl(Attributor &A) override {
1839     const size_t UBPrevSize = KnownUBInsts.size();
1840     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1841 
1842     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1843       // Skip instructions that are already saved.
1844       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1845         return true;
1846 
1847       // If we reach here, we know we have an instruction
1848       // that accesses memory through a pointer operand,
1849       // for which getPointerOperand() should give it to us.
1850       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1851       assert(PtrOp &&
1852              "Expected pointer operand of memory accessing instruction");
1853 
1854       // Either we stopped and the appropriate action was taken,
1855       // or we got back a simplified value to continue.
1856       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1857       if (!SimplifiedPtrOp.hasValue())
1858         return true;
1859       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1860 
1861       // A memory access through a pointer is considered UB
1862       // only if the pointer has constant null value.
1863       // TODO: Expand it to not only check constant values.
1864       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1865         AssumedNoUBInsts.insert(&I);
1866         return true;
1867       }
1868       const Type *PtrTy = PtrOpVal->getType();
1869 
1870       // Because we only consider instructions inside functions,
1871       // assume that a parent function exists.
1872       const Function *F = I.getFunction();
1873 
1874       // A memory access using constant null pointer is only considered UB
1875       // if null pointer is _not_ defined for the target platform.
1876       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1877         AssumedNoUBInsts.insert(&I);
1878       else
1879         KnownUBInsts.insert(&I);
1880       return true;
1881     };
1882 
1883     auto InspectBrInstForUB = [&](Instruction &I) {
1884       // A conditional branch instruction is considered UB if it has `undef`
1885       // condition.
1886 
1887       // Skip instructions that are already saved.
1888       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1889         return true;
1890 
1891       // We know we have a branch instruction.
1892       auto BrInst = cast<BranchInst>(&I);
1893 
1894       // Unconditional branches are never considered UB.
1895       if (BrInst->isUnconditional())
1896         return true;
1897 
1898       // Either we stopped and the appropriate action was taken,
1899       // or we got back a simplified value to continue.
1900       Optional<Value *> SimplifiedCond =
1901           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1902       if (!SimplifiedCond.hasValue())
1903         return true;
1904       AssumedNoUBInsts.insert(&I);
1905       return true;
1906     };
1907 
1908     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1909                               {Instruction::Load, Instruction::Store,
1910                                Instruction::AtomicCmpXchg,
1911                                Instruction::AtomicRMW},
1912                               /* CheckBBLivenessOnly */ true);
1913     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1914                               /* CheckBBLivenessOnly */ true);
1915     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1916         UBPrevSize != KnownUBInsts.size())
1917       return ChangeStatus::CHANGED;
1918     return ChangeStatus::UNCHANGED;
1919   }
1920 
1921   bool isKnownToCauseUB(Instruction *I) const override {
1922     return KnownUBInsts.count(I);
1923   }
1924 
1925   bool isAssumedToCauseUB(Instruction *I) const override {
1926     // In simple words, if an instruction is not in the assumed to _not_
1927     // cause UB, then it is assumed UB (that includes those
1928     // in the KnownUBInsts set). The rest is boilerplate
1929     // is to ensure that it is one of the instructions we test
1930     // for UB.
1931 
1932     switch (I->getOpcode()) {
1933     case Instruction::Load:
1934     case Instruction::Store:
1935     case Instruction::AtomicCmpXchg:
1936     case Instruction::AtomicRMW:
1937       return !AssumedNoUBInsts.count(I);
1938     case Instruction::Br: {
1939       auto BrInst = cast<BranchInst>(I);
1940       if (BrInst->isUnconditional())
1941         return false;
1942       return !AssumedNoUBInsts.count(I);
1943     } break;
1944     default:
1945       return false;
1946     }
1947     return false;
1948   }
1949 
1950   ChangeStatus manifest(Attributor &A) override {
1951     if (KnownUBInsts.empty())
1952       return ChangeStatus::UNCHANGED;
1953     for (Instruction *I : KnownUBInsts)
1954       A.changeToUnreachableAfterManifest(I);
1955     return ChangeStatus::CHANGED;
1956   }
1957 
1958   /// See AbstractAttribute::getAsStr()
1959   const std::string getAsStr() const override {
1960     return getAssumed() ? "undefined-behavior" : "no-ub";
1961   }
1962 
1963   /// Note: The correctness of this analysis depends on the fact that the
1964   /// following 2 sets will stop changing after some point.
1965   /// "Change" here means that their size changes.
1966   /// The size of each set is monotonically increasing
1967   /// (we only add items to them) and it is upper bounded by the number of
1968   /// instructions in the processed function (we can never save more
1969   /// elements in either set than this number). Hence, at some point,
1970   /// they will stop increasing.
1971   /// Consequently, at some point, both sets will have stopped
1972   /// changing, effectively making the analysis reach a fixpoint.
1973 
1974   /// Note: These 2 sets are disjoint and an instruction can be considered
1975   /// one of 3 things:
1976   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
1977   ///    the KnownUBInsts set.
1978   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
1979   ///    has a reason to assume it).
1980   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
1981   ///    could not find a reason to assume or prove that it can cause UB,
1982   ///    hence it assumes it doesn't. We have a set for these instructions
1983   ///    so that we don't reprocess them in every update.
1984   ///    Note however that instructions in this set may cause UB.
1985 
1986 protected:
1987   /// A set of all live instructions _known_ to cause UB.
1988   SmallPtrSet<Instruction *, 8> KnownUBInsts;
1989 
1990 private:
1991   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
1992   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
1993 
1994   // Should be called on updates in which if we're processing an instruction
1995   // \p I that depends on a value \p V, one of the following has to happen:
1996   // - If the value is assumed, then stop.
1997   // - If the value is known but undef, then consider it UB.
1998   // - Otherwise, do specific processing with the simplified value.
1999   // We return None in the first 2 cases to signify that an appropriate
2000   // action was taken and the caller should stop.
2001   // Otherwise, we return the simplified value that the caller should
2002   // use for specific processing.
2003   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2004                                          Instruction *I) {
2005     const auto &ValueSimplifyAA =
2006         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2007     Optional<Value *> SimplifiedV =
2008         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2009     if (!ValueSimplifyAA.isKnown()) {
2010       // Don't depend on assumed values.
2011       return llvm::None;
2012     }
2013     if (!SimplifiedV.hasValue()) {
2014       // If it is known (which we tested above) but it doesn't have a value,
2015       // then we can assume `undef` and hence the instruction is UB.
2016       KnownUBInsts.insert(I);
2017       return llvm::None;
2018     }
2019     Value *Val = SimplifiedV.getValue();
2020     if (isa<UndefValue>(Val)) {
2021       KnownUBInsts.insert(I);
2022       return llvm::None;
2023     }
2024     return Val;
2025   }
2026 };
2027 
2028 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2029   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2030       : AAUndefinedBehaviorImpl(IRP, A) {}
2031 
2032   /// See AbstractAttribute::trackStatistics()
2033   void trackStatistics() const override {
2034     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2035                "Number of instructions known to have UB");
2036     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2037         KnownUBInsts.size();
2038   }
2039 };
2040 
2041 /// ------------------------ Will-Return Attributes ----------------------------
2042 
2043 // Helper function that checks whether a function has any cycle which we don't
2044 // know if it is bounded or not.
2045 // Loops with maximum trip count are considered bounded, any other cycle not.
2046 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2047   ScalarEvolution *SE =
2048       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2049   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2050   // If either SCEV or LoopInfo is not available for the function then we assume
2051   // any cycle to be unbounded cycle.
2052   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2053   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2054   if (!SE || !LI) {
2055     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2056       if (SCCI.hasCycle())
2057         return true;
2058     return false;
2059   }
2060 
2061   // If there's irreducible control, the function may contain non-loop cycles.
2062   if (mayContainIrreducibleControl(F, LI))
2063     return true;
2064 
2065   // Any loop that does not have a max trip count is considered unbounded cycle.
2066   for (auto *L : LI->getLoopsInPreorder()) {
2067     if (!SE->getSmallConstantMaxTripCount(L))
2068       return true;
2069   }
2070   return false;
2071 }
2072 
2073 struct AAWillReturnImpl : public AAWillReturn {
2074   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2075       : AAWillReturn(IRP, A) {}
2076 
2077   /// See AbstractAttribute::initialize(...).
2078   void initialize(Attributor &A) override {
2079     AAWillReturn::initialize(A);
2080 
2081     Function *F = getAnchorScope();
2082     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2083       indicatePessimisticFixpoint();
2084   }
2085 
2086   /// See AbstractAttribute::updateImpl(...).
2087   ChangeStatus updateImpl(Attributor &A) override {
2088     auto CheckForWillReturn = [&](Instruction &I) {
2089       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2090       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2091       if (WillReturnAA.isKnownWillReturn())
2092         return true;
2093       if (!WillReturnAA.isAssumedWillReturn())
2094         return false;
2095       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2096       return NoRecurseAA.isAssumedNoRecurse();
2097     };
2098 
2099     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2100       return indicatePessimisticFixpoint();
2101 
2102     return ChangeStatus::UNCHANGED;
2103   }
2104 
2105   /// See AbstractAttribute::getAsStr()
2106   const std::string getAsStr() const override {
2107     return getAssumed() ? "willreturn" : "may-noreturn";
2108   }
2109 };
2110 
2111 struct AAWillReturnFunction final : AAWillReturnImpl {
2112   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2113       : AAWillReturnImpl(IRP, A) {}
2114 
2115   /// See AbstractAttribute::trackStatistics()
2116   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2117 };
2118 
2119 /// WillReturn attribute deduction for a call sites.
2120 struct AAWillReturnCallSite final : AAWillReturnImpl {
2121   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2122       : AAWillReturnImpl(IRP, A) {}
2123 
2124   /// See AbstractAttribute::initialize(...).
2125   void initialize(Attributor &A) override {
2126     AAWillReturnImpl::initialize(A);
2127     Function *F = getAssociatedFunction();
2128     if (!F)
2129       indicatePessimisticFixpoint();
2130   }
2131 
2132   /// See AbstractAttribute::updateImpl(...).
2133   ChangeStatus updateImpl(Attributor &A) override {
2134     // TODO: Once we have call site specific value information we can provide
2135     //       call site specific liveness information and then it makes
2136     //       sense to specialize attributes for call sites arguments instead of
2137     //       redirecting requests to the callee argument.
2138     Function *F = getAssociatedFunction();
2139     const IRPosition &FnPos = IRPosition::function(*F);
2140     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2141     return clampStateAndIndicateChange(
2142         getState(),
2143         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2144   }
2145 
2146   /// See AbstractAttribute::trackStatistics()
2147   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2148 };
2149 
2150 /// -------------------AAReachability Attribute--------------------------
2151 
2152 struct AAReachabilityImpl : AAReachability {
2153   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2154       : AAReachability(IRP, A) {}
2155 
2156   const std::string getAsStr() const override {
2157     // TODO: Return the number of reachable queries.
2158     return "reachable";
2159   }
2160 
2161   /// See AbstractAttribute::initialize(...).
2162   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2163 
2164   /// See AbstractAttribute::updateImpl(...).
2165   ChangeStatus updateImpl(Attributor &A) override {
2166     return indicatePessimisticFixpoint();
2167   }
2168 };
2169 
2170 struct AAReachabilityFunction final : public AAReachabilityImpl {
2171   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2172       : AAReachabilityImpl(IRP, A) {}
2173 
2174   /// See AbstractAttribute::trackStatistics()
2175   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2176 };
2177 
2178 /// ------------------------ NoAlias Argument Attribute ------------------------
2179 
2180 struct AANoAliasImpl : AANoAlias {
2181   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2182     assert(getAssociatedType()->isPointerTy() &&
2183            "Noalias is a pointer attribute");
2184   }
2185 
2186   const std::string getAsStr() const override {
2187     return getAssumed() ? "noalias" : "may-alias";
2188   }
2189 };
2190 
2191 /// NoAlias attribute for a floating value.
2192 struct AANoAliasFloating final : AANoAliasImpl {
2193   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2194       : AANoAliasImpl(IRP, A) {}
2195 
2196   /// See AbstractAttribute::initialize(...).
2197   void initialize(Attributor &A) override {
2198     AANoAliasImpl::initialize(A);
2199     Value *Val = &getAssociatedValue();
2200     do {
2201       CastInst *CI = dyn_cast<CastInst>(Val);
2202       if (!CI)
2203         break;
2204       Value *Base = CI->getOperand(0);
2205       if (!Base->hasOneUse())
2206         break;
2207       Val = Base;
2208     } while (true);
2209 
2210     if (!Val->getType()->isPointerTy()) {
2211       indicatePessimisticFixpoint();
2212       return;
2213     }
2214 
2215     if (isa<AllocaInst>(Val))
2216       indicateOptimisticFixpoint();
2217     else if (isa<ConstantPointerNull>(Val) &&
2218              !NullPointerIsDefined(getAnchorScope(),
2219                                    Val->getType()->getPointerAddressSpace()))
2220       indicateOptimisticFixpoint();
2221     else if (Val != &getAssociatedValue()) {
2222       const auto &ValNoAliasAA =
2223           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2224       if (ValNoAliasAA.isKnownNoAlias())
2225         indicateOptimisticFixpoint();
2226     }
2227   }
2228 
2229   /// See AbstractAttribute::updateImpl(...).
2230   ChangeStatus updateImpl(Attributor &A) override {
2231     // TODO: Implement this.
2232     return indicatePessimisticFixpoint();
2233   }
2234 
2235   /// See AbstractAttribute::trackStatistics()
2236   void trackStatistics() const override {
2237     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2238   }
2239 };
2240 
2241 /// NoAlias attribute for an argument.
2242 struct AANoAliasArgument final
2243     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2244   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2245   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2246 
2247   /// See AbstractAttribute::initialize(...).
2248   void initialize(Attributor &A) override {
2249     Base::initialize(A);
2250     // See callsite argument attribute and callee argument attribute.
2251     if (hasAttr({Attribute::ByVal}))
2252       indicateOptimisticFixpoint();
2253   }
2254 
2255   /// See AbstractAttribute::update(...).
2256   ChangeStatus updateImpl(Attributor &A) override {
2257     // We have to make sure no-alias on the argument does not break
2258     // synchronization when this is a callback argument, see also [1] below.
2259     // If synchronization cannot be affected, we delegate to the base updateImpl
2260     // function, otherwise we give up for now.
2261 
2262     // If the function is no-sync, no-alias cannot break synchronization.
2263     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2264         *this, IRPosition::function_scope(getIRPosition()));
2265     if (NoSyncAA.isAssumedNoSync())
2266       return Base::updateImpl(A);
2267 
2268     // If the argument is read-only, no-alias cannot break synchronization.
2269     const auto &MemBehaviorAA =
2270         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2271     if (MemBehaviorAA.isAssumedReadOnly())
2272       return Base::updateImpl(A);
2273 
2274     // If the argument is never passed through callbacks, no-alias cannot break
2275     // synchronization.
2276     bool AllCallSitesKnown;
2277     if (A.checkForAllCallSites(
2278             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2279             true, AllCallSitesKnown))
2280       return Base::updateImpl(A);
2281 
2282     // TODO: add no-alias but make sure it doesn't break synchronization by
2283     // introducing fake uses. See:
2284     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2285     //     International Workshop on OpenMP 2018,
2286     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2287 
2288     return indicatePessimisticFixpoint();
2289   }
2290 
2291   /// See AbstractAttribute::trackStatistics()
2292   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2293 };
2294 
2295 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2296   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2297       : AANoAliasImpl(IRP, A) {}
2298 
2299   /// See AbstractAttribute::initialize(...).
2300   void initialize(Attributor &A) override {
2301     // See callsite argument attribute and callee argument attribute.
2302     const auto &CB = cast<CallBase>(getAnchorValue());
2303     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2304       indicateOptimisticFixpoint();
2305     Value &Val = getAssociatedValue();
2306     if (isa<ConstantPointerNull>(Val) &&
2307         !NullPointerIsDefined(getAnchorScope(),
2308                               Val.getType()->getPointerAddressSpace()))
2309       indicateOptimisticFixpoint();
2310   }
2311 
2312   /// Determine if the underlying value may alias with the call site argument
2313   /// \p OtherArgNo of \p ICS (= the underlying call site).
2314   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2315                             const AAMemoryBehavior &MemBehaviorAA,
2316                             const CallBase &CB, unsigned OtherArgNo) {
2317     // We do not need to worry about aliasing with the underlying IRP.
2318     if (this->getArgNo() == (int)OtherArgNo)
2319       return false;
2320 
2321     // If it is not a pointer or pointer vector we do not alias.
2322     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2323     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2324       return false;
2325 
2326     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2327         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2328         /* TrackDependence */ false);
2329 
2330     // If the argument is readnone, there is no read-write aliasing.
2331     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2332       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2333       return false;
2334     }
2335 
2336     // If the argument is readonly and the underlying value is readonly, there
2337     // is no read-write aliasing.
2338     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2339     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2340       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2341       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2342       return false;
2343     }
2344 
2345     // We have to utilize actual alias analysis queries so we need the object.
2346     if (!AAR)
2347       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2348 
2349     // Try to rule it out at the call site.
2350     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2351     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2352                          "callsite arguments: "
2353                       << getAssociatedValue() << " " << *ArgOp << " => "
2354                       << (IsAliasing ? "" : "no-") << "alias \n");
2355 
2356     return IsAliasing;
2357   }
2358 
2359   bool
2360   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2361                                          const AAMemoryBehavior &MemBehaviorAA,
2362                                          const AANoAlias &NoAliasAA) {
2363     // We can deduce "noalias" if the following conditions hold.
2364     // (i)   Associated value is assumed to be noalias in the definition.
2365     // (ii)  Associated value is assumed to be no-capture in all the uses
2366     //       possibly executed before this callsite.
2367     // (iii) There is no other pointer argument which could alias with the
2368     //       value.
2369 
2370     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2371     if (!AssociatedValueIsNoAliasAtDef) {
2372       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2373                         << " is not no-alias at the definition\n");
2374       return false;
2375     }
2376 
2377     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2378 
2379     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2380     auto &NoCaptureAA =
2381         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2382     // Check whether the value is captured in the scope using AANoCapture.
2383     //      Look at CFG and check only uses possibly executed before this
2384     //      callsite.
2385     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2386       Instruction *UserI = cast<Instruction>(U.getUser());
2387 
2388       // If user if curr instr and only use.
2389       if (UserI == getCtxI() && UserI->hasOneUse())
2390         return true;
2391 
2392       const Function *ScopeFn = VIRP.getAnchorScope();
2393       if (ScopeFn) {
2394         const auto &ReachabilityAA =
2395             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2396 
2397         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2398           return true;
2399 
2400         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2401           if (CB->isArgOperand(&U)) {
2402 
2403             unsigned ArgNo = CB->getArgOperandNo(&U);
2404 
2405             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2406                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2407 
2408             if (NoCaptureAA.isAssumedNoCapture())
2409               return true;
2410           }
2411         }
2412       }
2413 
2414       // For cases which can potentially have more users
2415       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2416           isa<SelectInst>(U)) {
2417         Follow = true;
2418         return true;
2419       }
2420 
2421       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2422       return false;
2423     };
2424 
2425     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2426       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2427         LLVM_DEBUG(
2428             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2429                    << " cannot be noalias as it is potentially captured\n");
2430         return false;
2431       }
2432     }
2433     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2434 
2435     // Check there is no other pointer argument which could alias with the
2436     // value passed at this call site.
2437     // TODO: AbstractCallSite
2438     const auto &CB = cast<CallBase>(getAnchorValue());
2439     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2440          OtherArgNo++)
2441       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2442         return false;
2443 
2444     return true;
2445   }
2446 
2447   /// See AbstractAttribute::updateImpl(...).
2448   ChangeStatus updateImpl(Attributor &A) override {
2449     // If the argument is readnone we are done as there are no accesses via the
2450     // argument.
2451     auto &MemBehaviorAA =
2452         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2453                                      /* TrackDependence */ false);
2454     if (MemBehaviorAA.isAssumedReadNone()) {
2455       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2456       return ChangeStatus::UNCHANGED;
2457     }
2458 
2459     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2460     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2461                                                   /* TrackDependence */ false);
2462 
2463     AAResults *AAR = nullptr;
2464     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2465                                                NoAliasAA)) {
2466       LLVM_DEBUG(
2467           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2468       return ChangeStatus::UNCHANGED;
2469     }
2470 
2471     return indicatePessimisticFixpoint();
2472   }
2473 
2474   /// See AbstractAttribute::trackStatistics()
2475   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2476 };
2477 
2478 /// NoAlias attribute for function return value.
2479 struct AANoAliasReturned final : AANoAliasImpl {
2480   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2481       : AANoAliasImpl(IRP, A) {}
2482 
2483   /// See AbstractAttribute::updateImpl(...).
2484   virtual ChangeStatus updateImpl(Attributor &A) override {
2485 
2486     auto CheckReturnValue = [&](Value &RV) -> bool {
2487       if (Constant *C = dyn_cast<Constant>(&RV))
2488         if (C->isNullValue() || isa<UndefValue>(C))
2489           return true;
2490 
2491       /// For now, we can only deduce noalias if we have call sites.
2492       /// FIXME: add more support.
2493       if (!isa<CallBase>(&RV))
2494         return false;
2495 
2496       const IRPosition &RVPos = IRPosition::value(RV);
2497       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2498       if (!NoAliasAA.isAssumedNoAlias())
2499         return false;
2500 
2501       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2502       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2503     };
2504 
2505     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2506       return indicatePessimisticFixpoint();
2507 
2508     return ChangeStatus::UNCHANGED;
2509   }
2510 
2511   /// See AbstractAttribute::trackStatistics()
2512   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2513 };
2514 
2515 /// NoAlias attribute deduction for a call site return value.
2516 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2517   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2518       : AANoAliasImpl(IRP, A) {}
2519 
2520   /// See AbstractAttribute::initialize(...).
2521   void initialize(Attributor &A) override {
2522     AANoAliasImpl::initialize(A);
2523     Function *F = getAssociatedFunction();
2524     if (!F)
2525       indicatePessimisticFixpoint();
2526   }
2527 
2528   /// See AbstractAttribute::updateImpl(...).
2529   ChangeStatus updateImpl(Attributor &A) override {
2530     // TODO: Once we have call site specific value information we can provide
2531     //       call site specific liveness information and then it makes
2532     //       sense to specialize attributes for call sites arguments instead of
2533     //       redirecting requests to the callee argument.
2534     Function *F = getAssociatedFunction();
2535     const IRPosition &FnPos = IRPosition::returned(*F);
2536     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2537     return clampStateAndIndicateChange(
2538         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2539   }
2540 
2541   /// See AbstractAttribute::trackStatistics()
2542   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2543 };
2544 
2545 /// -------------------AAIsDead Function Attribute-----------------------
2546 
2547 struct AAIsDeadValueImpl : public AAIsDead {
2548   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2549 
2550   /// See AAIsDead::isAssumedDead().
2551   bool isAssumedDead() const override { return getAssumed(); }
2552 
2553   /// See AAIsDead::isKnownDead().
2554   bool isKnownDead() const override { return getKnown(); }
2555 
2556   /// See AAIsDead::isAssumedDead(BasicBlock *).
2557   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2558 
2559   /// See AAIsDead::isKnownDead(BasicBlock *).
2560   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2561 
2562   /// See AAIsDead::isAssumedDead(Instruction *I).
2563   bool isAssumedDead(const Instruction *I) const override {
2564     return I == getCtxI() && isAssumedDead();
2565   }
2566 
2567   /// See AAIsDead::isKnownDead(Instruction *I).
2568   bool isKnownDead(const Instruction *I) const override {
2569     return isAssumedDead(I) && getKnown();
2570   }
2571 
2572   /// See AbstractAttribute::getAsStr().
2573   const std::string getAsStr() const override {
2574     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2575   }
2576 
2577   /// Check if all uses are assumed dead.
2578   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2579     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2580     // Explicitly set the dependence class to required because we want a long
2581     // chain of N dependent instructions to be considered live as soon as one is
2582     // without going through N update cycles. This is not required for
2583     // correctness.
2584     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2585   }
2586 
2587   /// Determine if \p I is assumed to be side-effect free.
2588   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2589     if (!I || wouldInstructionBeTriviallyDead(I))
2590       return true;
2591 
2592     auto *CB = dyn_cast<CallBase>(I);
2593     if (!CB || isa<IntrinsicInst>(CB))
2594       return false;
2595 
2596     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2597     const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(*this, CallIRP);
2598     if (!NoUnwindAA.isAssumedNoUnwind())
2599       return false;
2600 
2601     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2602         *this, CallIRP, /* TrackDependence */ false);
2603     if (MemBehaviorAA.isAssumedReadOnly()) {
2604       if (!MemBehaviorAA.isKnownReadOnly())
2605         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2606       return true;
2607     }
2608     return false;
2609   }
2610 };
2611 
2612 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2613   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2614       : AAIsDeadValueImpl(IRP, A) {}
2615 
2616   /// See AbstractAttribute::initialize(...).
2617   void initialize(Attributor &A) override {
2618     if (isa<UndefValue>(getAssociatedValue())) {
2619       indicatePessimisticFixpoint();
2620       return;
2621     }
2622 
2623     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2624     if (!isAssumedSideEffectFree(A, I))
2625       indicatePessimisticFixpoint();
2626   }
2627 
2628   /// See AbstractAttribute::updateImpl(...).
2629   ChangeStatus updateImpl(Attributor &A) override {
2630     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2631     if (!isAssumedSideEffectFree(A, I))
2632       return indicatePessimisticFixpoint();
2633 
2634     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2635       return indicatePessimisticFixpoint();
2636     return ChangeStatus::UNCHANGED;
2637   }
2638 
2639   /// See AbstractAttribute::manifest(...).
2640   ChangeStatus manifest(Attributor &A) override {
2641     Value &V = getAssociatedValue();
2642     if (auto *I = dyn_cast<Instruction>(&V)) {
2643       // If we get here we basically know the users are all dead. We check if
2644       // isAssumedSideEffectFree returns true here again because it might not be
2645       // the case and only the users are dead but the instruction (=call) is
2646       // still needed.
2647       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2648         A.deleteAfterManifest(*I);
2649         return ChangeStatus::CHANGED;
2650       }
2651     }
2652     if (V.use_empty())
2653       return ChangeStatus::UNCHANGED;
2654 
2655     bool UsedAssumedInformation = false;
2656     Optional<Constant *> C =
2657         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2658     if (C.hasValue() && C.getValue())
2659       return ChangeStatus::UNCHANGED;
2660 
2661     // Replace the value with undef as it is dead but keep droppable uses around
2662     // as they provide information we don't want to give up on just yet.
2663     UndefValue &UV = *UndefValue::get(V.getType());
2664     bool AnyChange =
2665         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2666     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2667   }
2668 
2669   /// See AbstractAttribute::trackStatistics()
2670   void trackStatistics() const override {
2671     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2672   }
2673 };
2674 
2675 struct AAIsDeadArgument : public AAIsDeadFloating {
2676   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2677       : AAIsDeadFloating(IRP, A) {}
2678 
2679   /// See AbstractAttribute::initialize(...).
2680   void initialize(Attributor &A) override {
2681     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2682       indicatePessimisticFixpoint();
2683   }
2684 
2685   /// See AbstractAttribute::manifest(...).
2686   ChangeStatus manifest(Attributor &A) override {
2687     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2688     Argument &Arg = *getAssociatedArgument();
2689     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2690       if (A.registerFunctionSignatureRewrite(
2691               Arg, /* ReplacementTypes */ {},
2692               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2693               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2694         Arg.dropDroppableUses();
2695         return ChangeStatus::CHANGED;
2696       }
2697     return Changed;
2698   }
2699 
2700   /// See AbstractAttribute::trackStatistics()
2701   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2702 };
2703 
2704 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2705   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2706       : AAIsDeadValueImpl(IRP, A) {}
2707 
2708   /// See AbstractAttribute::initialize(...).
2709   void initialize(Attributor &A) override {
2710     if (isa<UndefValue>(getAssociatedValue()))
2711       indicatePessimisticFixpoint();
2712   }
2713 
2714   /// See AbstractAttribute::updateImpl(...).
2715   ChangeStatus updateImpl(Attributor &A) override {
2716     // TODO: Once we have call site specific value information we can provide
2717     //       call site specific liveness information and then it makes
2718     //       sense to specialize attributes for call sites arguments instead of
2719     //       redirecting requests to the callee argument.
2720     Argument *Arg = getAssociatedArgument();
2721     if (!Arg)
2722       return indicatePessimisticFixpoint();
2723     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2724     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2725     return clampStateAndIndicateChange(
2726         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2727   }
2728 
2729   /// See AbstractAttribute::manifest(...).
2730   ChangeStatus manifest(Attributor &A) override {
2731     CallBase &CB = cast<CallBase>(getAnchorValue());
2732     Use &U = CB.getArgOperandUse(getArgNo());
2733     assert(!isa<UndefValue>(U.get()) &&
2734            "Expected undef values to be filtered out!");
2735     UndefValue &UV = *UndefValue::get(U->getType());
2736     if (A.changeUseAfterManifest(U, UV))
2737       return ChangeStatus::CHANGED;
2738     return ChangeStatus::UNCHANGED;
2739   }
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2743 };
2744 
2745 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2746   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2747       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2748 
2749   /// See AAIsDead::isAssumedDead().
2750   bool isAssumedDead() const override {
2751     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2752   }
2753 
2754   /// See AbstractAttribute::initialize(...).
2755   void initialize(Attributor &A) override {
2756     if (isa<UndefValue>(getAssociatedValue())) {
2757       indicatePessimisticFixpoint();
2758       return;
2759     }
2760 
2761     // We track this separately as a secondary state.
2762     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2763   }
2764 
2765   /// See AbstractAttribute::updateImpl(...).
2766   ChangeStatus updateImpl(Attributor &A) override {
2767     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2768     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2769       IsAssumedSideEffectFree = false;
2770       Changed = ChangeStatus::CHANGED;
2771     }
2772 
2773     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2774       return indicatePessimisticFixpoint();
2775     return Changed;
2776   }
2777 
2778   /// See AbstractAttribute::trackStatistics()
2779   void trackStatistics() const override {
2780     if (IsAssumedSideEffectFree)
2781       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2782     else
2783       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2784   }
2785 
2786   /// See AbstractAttribute::getAsStr().
2787   const std::string getAsStr() const override {
2788     return isAssumedDead()
2789                ? "assumed-dead"
2790                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2791   }
2792 
2793 private:
2794   bool IsAssumedSideEffectFree;
2795 };
2796 
2797 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2798   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2799       : AAIsDeadValueImpl(IRP, A) {}
2800 
2801   /// See AbstractAttribute::updateImpl(...).
2802   ChangeStatus updateImpl(Attributor &A) override {
2803 
2804     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2805                               {Instruction::Ret});
2806 
2807     auto PredForCallSite = [&](AbstractCallSite ACS) {
2808       if (ACS.isCallbackCall() || !ACS.getInstruction())
2809         return false;
2810       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2811     };
2812 
2813     bool AllCallSitesKnown;
2814     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2815                                 AllCallSitesKnown))
2816       return indicatePessimisticFixpoint();
2817 
2818     return ChangeStatus::UNCHANGED;
2819   }
2820 
2821   /// See AbstractAttribute::manifest(...).
2822   ChangeStatus manifest(Attributor &A) override {
2823     // TODO: Rewrite the signature to return void?
2824     bool AnyChange = false;
2825     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2826     auto RetInstPred = [&](Instruction &I) {
2827       ReturnInst &RI = cast<ReturnInst>(I);
2828       if (!isa<UndefValue>(RI.getReturnValue()))
2829         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2830       return true;
2831     };
2832     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2833     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2834   }
2835 
2836   /// See AbstractAttribute::trackStatistics()
2837   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2838 };
2839 
2840 struct AAIsDeadFunction : public AAIsDead {
2841   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2842 
2843   /// See AbstractAttribute::initialize(...).
2844   void initialize(Attributor &A) override {
2845     const Function *F = getAnchorScope();
2846     if (F && !F->isDeclaration()) {
2847       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2848       assumeLive(A, F->getEntryBlock());
2849     }
2850   }
2851 
2852   /// See AbstractAttribute::getAsStr().
2853   const std::string getAsStr() const override {
2854     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2855            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2856            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2857            std::to_string(KnownDeadEnds.size()) + "]";
2858   }
2859 
2860   /// See AbstractAttribute::manifest(...).
2861   ChangeStatus manifest(Attributor &A) override {
2862     assert(getState().isValidState() &&
2863            "Attempted to manifest an invalid state!");
2864 
2865     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2866     Function &F = *getAnchorScope();
2867 
2868     if (AssumedLiveBlocks.empty()) {
2869       A.deleteAfterManifest(F);
2870       return ChangeStatus::CHANGED;
2871     }
2872 
2873     // Flag to determine if we can change an invoke to a call assuming the
2874     // callee is nounwind. This is not possible if the personality of the
2875     // function allows to catch asynchronous exceptions.
2876     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2877 
2878     KnownDeadEnds.set_union(ToBeExploredFrom);
2879     for (const Instruction *DeadEndI : KnownDeadEnds) {
2880       auto *CB = dyn_cast<CallBase>(DeadEndI);
2881       if (!CB)
2882         continue;
2883       const auto &NoReturnAA =
2884           A.getAAFor<AANoReturn>(*this, IRPosition::callsite_function(*CB));
2885       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2886       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2887         continue;
2888 
2889       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2890         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2891       else
2892         A.changeToUnreachableAfterManifest(
2893             const_cast<Instruction *>(DeadEndI->getNextNode()));
2894       HasChanged = ChangeStatus::CHANGED;
2895     }
2896 
2897     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2898     for (BasicBlock &BB : F)
2899       if (!AssumedLiveBlocks.count(&BB)) {
2900         A.deleteAfterManifest(BB);
2901         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2902       }
2903 
2904     return HasChanged;
2905   }
2906 
2907   /// See AbstractAttribute::updateImpl(...).
2908   ChangeStatus updateImpl(Attributor &A) override;
2909 
2910   /// See AbstractAttribute::trackStatistics()
2911   void trackStatistics() const override {}
2912 
2913   /// Returns true if the function is assumed dead.
2914   bool isAssumedDead() const override { return false; }
2915 
2916   /// See AAIsDead::isKnownDead().
2917   bool isKnownDead() const override { return false; }
2918 
2919   /// See AAIsDead::isAssumedDead(BasicBlock *).
2920   bool isAssumedDead(const BasicBlock *BB) const override {
2921     assert(BB->getParent() == getAnchorScope() &&
2922            "BB must be in the same anchor scope function.");
2923 
2924     if (!getAssumed())
2925       return false;
2926     return !AssumedLiveBlocks.count(BB);
2927   }
2928 
2929   /// See AAIsDead::isKnownDead(BasicBlock *).
2930   bool isKnownDead(const BasicBlock *BB) const override {
2931     return getKnown() && isAssumedDead(BB);
2932   }
2933 
2934   /// See AAIsDead::isAssumed(Instruction *I).
2935   bool isAssumedDead(const Instruction *I) const override {
2936     assert(I->getParent()->getParent() == getAnchorScope() &&
2937            "Instruction must be in the same anchor scope function.");
2938 
2939     if (!getAssumed())
2940       return false;
2941 
2942     // If it is not in AssumedLiveBlocks then it for sure dead.
2943     // Otherwise, it can still be after noreturn call in a live block.
2944     if (!AssumedLiveBlocks.count(I->getParent()))
2945       return true;
2946 
2947     // If it is not after a liveness barrier it is live.
2948     const Instruction *PrevI = I->getPrevNode();
2949     while (PrevI) {
2950       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2951         return true;
2952       PrevI = PrevI->getPrevNode();
2953     }
2954     return false;
2955   }
2956 
2957   /// See AAIsDead::isKnownDead(Instruction *I).
2958   bool isKnownDead(const Instruction *I) const override {
2959     return getKnown() && isAssumedDead(I);
2960   }
2961 
2962   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2963   /// that internal function called from \p BB should now be looked at.
2964   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2965     if (!AssumedLiveBlocks.insert(&BB).second)
2966       return false;
2967 
2968     // We assume that all of BB is (probably) live now and if there are calls to
2969     // internal functions we will assume that those are now live as well. This
2970     // is a performance optimization for blocks with calls to a lot of internal
2971     // functions. It can however cause dead functions to be treated as live.
2972     for (const Instruction &I : BB)
2973       if (const auto *CB = dyn_cast<CallBase>(&I))
2974         if (const Function *F = CB->getCalledFunction())
2975           if (F->hasLocalLinkage())
2976             A.markLiveInternalFunction(*F);
2977     return true;
2978   }
2979 
2980   /// Collection of instructions that need to be explored again, e.g., we
2981   /// did assume they do not transfer control to (one of their) successors.
2982   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
2983 
2984   /// Collection of instructions that are known to not transfer control.
2985   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
2986 
2987   /// Collection of all assumed live BasicBlocks.
2988   DenseSet<const BasicBlock *> AssumedLiveBlocks;
2989 };
2990 
2991 static bool
2992 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
2993                         AbstractAttribute &AA,
2994                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
2995   const IRPosition &IPos = IRPosition::callsite_function(CB);
2996 
2997   const auto &NoReturnAA = A.getAAFor<AANoReturn>(AA, IPos);
2998   if (NoReturnAA.isAssumedNoReturn())
2999     return !NoReturnAA.isKnownNoReturn();
3000   if (CB.isTerminator())
3001     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3002   else
3003     AliveSuccessors.push_back(CB.getNextNode());
3004   return false;
3005 }
3006 
3007 static bool
3008 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3009                         AbstractAttribute &AA,
3010                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3011   bool UsedAssumedInformation =
3012       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3013 
3014   // First, determine if we can change an invoke to a call assuming the
3015   // callee is nounwind. This is not possible if the personality of the
3016   // function allows to catch asynchronous exceptions.
3017   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3018     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3019   } else {
3020     const IRPosition &IPos = IRPosition::callsite_function(II);
3021     const auto &AANoUnw = A.getAAFor<AANoUnwind>(
3022         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3023     if (AANoUnw.isAssumedNoUnwind()) {
3024       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3025     } else {
3026       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3027     }
3028   }
3029   return UsedAssumedInformation;
3030 }
3031 
3032 static bool
3033 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3034                         AbstractAttribute &AA,
3035                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3036   bool UsedAssumedInformation = false;
3037   if (BI.getNumSuccessors() == 1) {
3038     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3039   } else {
3040     Optional<ConstantInt *> CI = getAssumedConstantInt(
3041         A, *BI.getCondition(), AA, UsedAssumedInformation);
3042     if (!CI.hasValue()) {
3043       // No value yet, assume both edges are dead.
3044     } else if (CI.getValue()) {
3045       const BasicBlock *SuccBB =
3046           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3047       AliveSuccessors.push_back(&SuccBB->front());
3048     } else {
3049       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3050       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3051       UsedAssumedInformation = false;
3052     }
3053   }
3054   return UsedAssumedInformation;
3055 }
3056 
3057 static bool
3058 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3059                         AbstractAttribute &AA,
3060                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3061   bool UsedAssumedInformation = false;
3062   Optional<ConstantInt *> CI =
3063       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3064   if (!CI.hasValue()) {
3065     // No value yet, assume all edges are dead.
3066   } else if (CI.getValue()) {
3067     for (auto &CaseIt : SI.cases()) {
3068       if (CaseIt.getCaseValue() == CI.getValue()) {
3069         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3070         return UsedAssumedInformation;
3071       }
3072     }
3073     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3074     return UsedAssumedInformation;
3075   } else {
3076     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3077       AliveSuccessors.push_back(&SuccBB->front());
3078   }
3079   return UsedAssumedInformation;
3080 }
3081 
3082 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3083   ChangeStatus Change = ChangeStatus::UNCHANGED;
3084 
3085   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3086                     << getAnchorScope()->size() << "] BBs and "
3087                     << ToBeExploredFrom.size() << " exploration points and "
3088                     << KnownDeadEnds.size() << " known dead ends\n");
3089 
3090   // Copy and clear the list of instructions we need to explore from. It is
3091   // refilled with instructions the next update has to look at.
3092   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3093                                                ToBeExploredFrom.end());
3094   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3095 
3096   SmallVector<const Instruction *, 8> AliveSuccessors;
3097   while (!Worklist.empty()) {
3098     const Instruction *I = Worklist.pop_back_val();
3099     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3100 
3101     AliveSuccessors.clear();
3102 
3103     bool UsedAssumedInformation = false;
3104     switch (I->getOpcode()) {
3105     // TODO: look for (assumed) UB to backwards propagate "deadness".
3106     default:
3107       if (I->isTerminator()) {
3108         for (const BasicBlock *SuccBB : successors(I->getParent()))
3109           AliveSuccessors.push_back(&SuccBB->front());
3110       } else {
3111         AliveSuccessors.push_back(I->getNextNode());
3112       }
3113       break;
3114     case Instruction::Call:
3115       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3116                                                        *this, AliveSuccessors);
3117       break;
3118     case Instruction::Invoke:
3119       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3120                                                        *this, AliveSuccessors);
3121       break;
3122     case Instruction::Br:
3123       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3124                                                        *this, AliveSuccessors);
3125       break;
3126     case Instruction::Switch:
3127       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3128                                                        *this, AliveSuccessors);
3129       break;
3130     }
3131 
3132     if (UsedAssumedInformation) {
3133       NewToBeExploredFrom.insert(I);
3134     } else {
3135       Change = ChangeStatus::CHANGED;
3136       if (AliveSuccessors.empty() ||
3137           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3138         KnownDeadEnds.insert(I);
3139     }
3140 
3141     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3142                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3143                       << UsedAssumedInformation << "\n");
3144 
3145     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3146       if (!I->isTerminator()) {
3147         assert(AliveSuccessors.size() == 1 &&
3148                "Non-terminator expected to have a single successor!");
3149         Worklist.push_back(AliveSuccessor);
3150       } else {
3151         if (assumeLive(A, *AliveSuccessor->getParent()))
3152           Worklist.push_back(AliveSuccessor);
3153       }
3154     }
3155   }
3156 
3157   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3158 
3159   // If we know everything is live there is no need to query for liveness.
3160   // Instead, indicating a pessimistic fixpoint will cause the state to be
3161   // "invalid" and all queries to be answered conservatively without lookups.
3162   // To be in this state we have to (1) finished the exploration and (3) not
3163   // discovered any non-trivial dead end and (2) not ruled unreachable code
3164   // dead.
3165   if (ToBeExploredFrom.empty() &&
3166       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3167       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3168         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3169       }))
3170     return indicatePessimisticFixpoint();
3171   return Change;
3172 }
3173 
3174 /// Liveness information for a call sites.
3175 struct AAIsDeadCallSite final : AAIsDeadFunction {
3176   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3177       : AAIsDeadFunction(IRP, A) {}
3178 
3179   /// See AbstractAttribute::initialize(...).
3180   void initialize(Attributor &A) override {
3181     // TODO: Once we have call site specific value information we can provide
3182     //       call site specific liveness information and then it makes
3183     //       sense to specialize attributes for call sites instead of
3184     //       redirecting requests to the callee.
3185     llvm_unreachable("Abstract attributes for liveness are not "
3186                      "supported for call sites yet!");
3187   }
3188 
3189   /// See AbstractAttribute::updateImpl(...).
3190   ChangeStatus updateImpl(Attributor &A) override {
3191     return indicatePessimisticFixpoint();
3192   }
3193 
3194   /// See AbstractAttribute::trackStatistics()
3195   void trackStatistics() const override {}
3196 };
3197 
3198 /// -------------------- Dereferenceable Argument Attribute --------------------
3199 
3200 template <>
3201 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3202                                                      const DerefState &R) {
3203   ChangeStatus CS0 =
3204       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3205   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3206   return CS0 | CS1;
3207 }
3208 
3209 struct AADereferenceableImpl : AADereferenceable {
3210   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3211       : AADereferenceable(IRP, A) {}
3212   using StateType = DerefState;
3213 
3214   /// See AbstractAttribute::initialize(...).
3215   void initialize(Attributor &A) override {
3216     SmallVector<Attribute, 4> Attrs;
3217     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3218              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3219     for (const Attribute &Attr : Attrs)
3220       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3221 
3222     NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition(),
3223                                        /* TrackDependence */ false);
3224 
3225     const IRPosition &IRP = this->getIRPosition();
3226     bool IsFnInterface = IRP.isFnInterfaceKind();
3227     Function *FnScope = IRP.getAnchorScope();
3228     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3229       indicatePessimisticFixpoint();
3230       return;
3231     }
3232 
3233     if (Instruction *CtxI = getCtxI())
3234       followUsesInMBEC(*this, A, getState(), *CtxI);
3235   }
3236 
3237   /// See AbstractAttribute::getState()
3238   /// {
3239   StateType &getState() override { return *this; }
3240   const StateType &getState() const override { return *this; }
3241   /// }
3242 
3243   /// Helper function for collecting accessed bytes in must-be-executed-context
3244   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3245                               DerefState &State) {
3246     const Value *UseV = U->get();
3247     if (!UseV->getType()->isPointerTy())
3248       return;
3249 
3250     Type *PtrTy = UseV->getType();
3251     const DataLayout &DL = A.getDataLayout();
3252     int64_t Offset;
3253     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3254             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3255       if (Base == &getAssociatedValue() &&
3256           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3257         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3258         State.addAccessedBytes(Offset, Size);
3259       }
3260     }
3261     return;
3262   }
3263 
3264   /// See followUsesInMBEC
3265   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3266                        AADereferenceable::StateType &State) {
3267     bool IsNonNull = false;
3268     bool TrackUse = false;
3269     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3270         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3271 
3272     addAccessedBytesForUse(A, U, I, State);
3273     State.takeKnownDerefBytesMaximum(DerefBytes);
3274     return TrackUse;
3275   }
3276 
3277   /// See AbstractAttribute::manifest(...).
3278   ChangeStatus manifest(Attributor &A) override {
3279     ChangeStatus Change = AADereferenceable::manifest(A);
3280     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3281       removeAttrs({Attribute::DereferenceableOrNull});
3282       return ChangeStatus::CHANGED;
3283     }
3284     return Change;
3285   }
3286 
3287   void getDeducedAttributes(LLVMContext &Ctx,
3288                             SmallVectorImpl<Attribute> &Attrs) const override {
3289     // TODO: Add *_globally support
3290     if (isAssumedNonNull())
3291       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3292           Ctx, getAssumedDereferenceableBytes()));
3293     else
3294       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3295           Ctx, getAssumedDereferenceableBytes()));
3296   }
3297 
3298   /// See AbstractAttribute::getAsStr().
3299   const std::string getAsStr() const override {
3300     if (!getAssumedDereferenceableBytes())
3301       return "unknown-dereferenceable";
3302     return std::string("dereferenceable") +
3303            (isAssumedNonNull() ? "" : "_or_null") +
3304            (isAssumedGlobal() ? "_globally" : "") + "<" +
3305            std::to_string(getKnownDereferenceableBytes()) + "-" +
3306            std::to_string(getAssumedDereferenceableBytes()) + ">";
3307   }
3308 };
3309 
3310 /// Dereferenceable attribute for a floating value.
3311 struct AADereferenceableFloating : AADereferenceableImpl {
3312   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3313       : AADereferenceableImpl(IRP, A) {}
3314 
3315   /// See AbstractAttribute::updateImpl(...).
3316   ChangeStatus updateImpl(Attributor &A) override {
3317     const DataLayout &DL = A.getDataLayout();
3318 
3319     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3320                             bool Stripped) -> bool {
3321       unsigned IdxWidth =
3322           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3323       APInt Offset(IdxWidth, 0);
3324       const Value *Base =
3325           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3326 
3327       const auto &AA =
3328           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3329       int64_t DerefBytes = 0;
3330       if (!Stripped && this == &AA) {
3331         // Use IR information if we did not strip anything.
3332         // TODO: track globally.
3333         bool CanBeNull;
3334         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3335         T.GlobalState.indicatePessimisticFixpoint();
3336       } else {
3337         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3338         DerefBytes = DS.DerefBytesState.getAssumed();
3339         T.GlobalState &= DS.GlobalState;
3340       }
3341 
3342       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3343 
3344       // For now we do not try to "increase" dereferenceability due to negative
3345       // indices as we first have to come up with code to deal with loops and
3346       // for overflows of the dereferenceable bytes.
3347       int64_t OffsetSExt = Offset.getSExtValue();
3348       if (OffsetSExt < 0)
3349         OffsetSExt = 0;
3350 
3351       T.takeAssumedDerefBytesMinimum(
3352           std::max(int64_t(0), DerefBytes - OffsetSExt));
3353 
3354       if (this == &AA) {
3355         if (!Stripped) {
3356           // If nothing was stripped IR information is all we got.
3357           T.takeKnownDerefBytesMaximum(
3358               std::max(int64_t(0), DerefBytes - OffsetSExt));
3359           T.indicatePessimisticFixpoint();
3360         } else if (OffsetSExt > 0) {
3361           // If something was stripped but there is circular reasoning we look
3362           // for the offset. If it is positive we basically decrease the
3363           // dereferenceable bytes in a circluar loop now, which will simply
3364           // drive them down to the known value in a very slow way which we
3365           // can accelerate.
3366           T.indicatePessimisticFixpoint();
3367         }
3368       }
3369 
3370       return T.isValidState();
3371     };
3372 
3373     DerefState T;
3374     if (!genericValueTraversal<AADereferenceable, DerefState>(
3375             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3376       return indicatePessimisticFixpoint();
3377 
3378     return clampStateAndIndicateChange(getState(), T);
3379   }
3380 
3381   /// See AbstractAttribute::trackStatistics()
3382   void trackStatistics() const override {
3383     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3384   }
3385 };
3386 
3387 /// Dereferenceable attribute for a return value.
3388 struct AADereferenceableReturned final
3389     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3390   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3391       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3392             IRP, A) {}
3393 
3394   /// See AbstractAttribute::trackStatistics()
3395   void trackStatistics() const override {
3396     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3397   }
3398 };
3399 
3400 /// Dereferenceable attribute for an argument
3401 struct AADereferenceableArgument final
3402     : AAArgumentFromCallSiteArguments<AADereferenceable,
3403                                       AADereferenceableImpl> {
3404   using Base =
3405       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3406   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3407       : Base(IRP, A) {}
3408 
3409   /// See AbstractAttribute::trackStatistics()
3410   void trackStatistics() const override {
3411     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3412   }
3413 };
3414 
3415 /// Dereferenceable attribute for a call site argument.
3416 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3417   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3418       : AADereferenceableFloating(IRP, A) {}
3419 
3420   /// See AbstractAttribute::trackStatistics()
3421   void trackStatistics() const override {
3422     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3423   }
3424 };
3425 
3426 /// Dereferenceable attribute deduction for a call site return value.
3427 struct AADereferenceableCallSiteReturned final
3428     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3429   using Base =
3430       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3431   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3432       : Base(IRP, A) {}
3433 
3434   /// See AbstractAttribute::trackStatistics()
3435   void trackStatistics() const override {
3436     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3437   }
3438 };
3439 
3440 // ------------------------ Align Argument Attribute ------------------------
3441 
3442 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3443 /// the element type to be aligned.
3444 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3445                                                    const DataLayout &DL) {
3446   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3447   Type *ElementTy = Ptr->getType()->getPointerElementType();
3448   if (ElementTy->isSized())
3449     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3450   return KnownAlignment;
3451 }
3452 
3453 static unsigned getKnownAlignForUse(Attributor &A,
3454                                     AbstractAttribute &QueryingAA,
3455                                     Value &AssociatedValue, const Use *U,
3456                                     const Instruction *I, bool &TrackUse) {
3457   // We need to follow common pointer manipulation uses to the accesses they
3458   // feed into.
3459   if (isa<CastInst>(I)) {
3460     // Follow all but ptr2int casts.
3461     TrackUse = !isa<PtrToIntInst>(I);
3462     return 0;
3463   }
3464   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3465     if (GEP->hasAllConstantIndices()) {
3466       TrackUse = true;
3467       return 0;
3468     }
3469   }
3470 
3471   MaybeAlign MA;
3472   if (const auto *CB = dyn_cast<CallBase>(I)) {
3473     if (CB->isBundleOperand(U) || CB->isCallee(U))
3474       return 0;
3475 
3476     unsigned ArgNo = CB->getArgOperandNo(U);
3477     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3478     // As long as we only use known information there is no need to track
3479     // dependences here.
3480     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3481                                         /* TrackDependence */ false);
3482     MA = MaybeAlign(AlignAA.getKnownAlign());
3483   }
3484 
3485   const DataLayout &DL = A.getDataLayout();
3486   const Value *UseV = U->get();
3487   if (auto *SI = dyn_cast<StoreInst>(I)) {
3488     if (SI->getPointerOperand() == UseV) {
3489       if (unsigned SIAlign = SI->getAlignment())
3490         MA = MaybeAlign(SIAlign);
3491       else
3492         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3493     }
3494   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3495     if (LI->getPointerOperand() == UseV) {
3496       if (unsigned LIAlign = LI->getAlignment())
3497         MA = MaybeAlign(LIAlign);
3498       else
3499         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3500     }
3501   }
3502 
3503   if (!MA.hasValue() || MA <= 1)
3504     return 0;
3505 
3506   unsigned Alignment = MA->value();
3507   int64_t Offset;
3508 
3509   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3510     if (Base == &AssociatedValue) {
3511       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3512       // So we can say that the maximum power of two which is a divisor of
3513       // gcd(Offset, Alignment) is an alignment.
3514 
3515       uint32_t gcd =
3516           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3517       Alignment = llvm::PowerOf2Floor(gcd);
3518     }
3519   }
3520 
3521   return Alignment;
3522 }
3523 
3524 struct AAAlignImpl : AAAlign {
3525   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3526 
3527   /// See AbstractAttribute::initialize(...).
3528   void initialize(Attributor &A) override {
3529     SmallVector<Attribute, 4> Attrs;
3530     getAttrs({Attribute::Alignment}, Attrs);
3531     for (const Attribute &Attr : Attrs)
3532       takeKnownMaximum(Attr.getValueAsInt());
3533 
3534     if (getIRPosition().isFnInterfaceKind() &&
3535         (!getAnchorScope() ||
3536          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3537       indicatePessimisticFixpoint();
3538       return;
3539     }
3540 
3541     if (Instruction *CtxI = getCtxI())
3542       followUsesInMBEC(*this, A, getState(), *CtxI);
3543   }
3544 
3545   /// See AbstractAttribute::manifest(...).
3546   ChangeStatus manifest(Attributor &A) override {
3547     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3548 
3549     // Check for users that allow alignment annotations.
3550     Value &AssociatedValue = getAssociatedValue();
3551     for (const Use &U : AssociatedValue.uses()) {
3552       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3553         if (SI->getPointerOperand() == &AssociatedValue)
3554           if (SI->getAlignment() < getAssumedAlign()) {
3555             STATS_DECLTRACK(AAAlign, Store,
3556                             "Number of times alignment added to a store");
3557             SI->setAlignment(Align(getAssumedAlign()));
3558             LoadStoreChanged = ChangeStatus::CHANGED;
3559           }
3560       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3561         if (LI->getPointerOperand() == &AssociatedValue)
3562           if (LI->getAlignment() < getAssumedAlign()) {
3563             LI->setAlignment(Align(getAssumedAlign()));
3564             STATS_DECLTRACK(AAAlign, Load,
3565                             "Number of times alignment added to a load");
3566             LoadStoreChanged = ChangeStatus::CHANGED;
3567           }
3568       }
3569     }
3570 
3571     ChangeStatus Changed = AAAlign::manifest(A);
3572 
3573     MaybeAlign InheritAlign =
3574         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3575     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3576       return LoadStoreChanged;
3577     return Changed | LoadStoreChanged;
3578   }
3579 
3580   // TODO: Provide a helper to determine the implied ABI alignment and check in
3581   //       the existing manifest method and a new one for AAAlignImpl that value
3582   //       to avoid making the alignment explicit if it did not improve.
3583 
3584   /// See AbstractAttribute::getDeducedAttributes
3585   virtual void
3586   getDeducedAttributes(LLVMContext &Ctx,
3587                        SmallVectorImpl<Attribute> &Attrs) const override {
3588     if (getAssumedAlign() > 1)
3589       Attrs.emplace_back(
3590           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3591   }
3592 
3593   /// See followUsesInMBEC
3594   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3595                        AAAlign::StateType &State) {
3596     bool TrackUse = false;
3597 
3598     unsigned int KnownAlign =
3599         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3600     State.takeKnownMaximum(KnownAlign);
3601 
3602     return TrackUse;
3603   }
3604 
3605   /// See AbstractAttribute::getAsStr().
3606   const std::string getAsStr() const override {
3607     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3608                                 "-" + std::to_string(getAssumedAlign()) + ">")
3609                              : "unknown-align";
3610   }
3611 };
3612 
3613 /// Align attribute for a floating value.
3614 struct AAAlignFloating : AAAlignImpl {
3615   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3616 
3617   /// See AbstractAttribute::updateImpl(...).
3618   ChangeStatus updateImpl(Attributor &A) override {
3619     const DataLayout &DL = A.getDataLayout();
3620 
3621     auto VisitValueCB = [&](Value &V, const Instruction *,
3622                             AAAlign::StateType &T, bool Stripped) -> bool {
3623       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3624       if (!Stripped && this == &AA) {
3625         // Use only IR information if we did not strip anything.
3626         const MaybeAlign PA = V.getPointerAlignment(DL);
3627         T.takeKnownMaximum(PA ? PA->value() : 0);
3628         T.indicatePessimisticFixpoint();
3629       } else {
3630         // Use abstract attribute information.
3631         const AAAlign::StateType &DS =
3632             static_cast<const AAAlign::StateType &>(AA.getState());
3633         T ^= DS;
3634       }
3635       return T.isValidState();
3636     };
3637 
3638     StateType T;
3639     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3640                                                    VisitValueCB, getCtxI()))
3641       return indicatePessimisticFixpoint();
3642 
3643     // TODO: If we know we visited all incoming values, thus no are assumed
3644     // dead, we can take the known information from the state T.
3645     return clampStateAndIndicateChange(getState(), T);
3646   }
3647 
3648   /// See AbstractAttribute::trackStatistics()
3649   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3650 };
3651 
3652 /// Align attribute for function return value.
3653 struct AAAlignReturned final
3654     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3655   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3656       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3657 
3658   /// See AbstractAttribute::trackStatistics()
3659   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3660 };
3661 
3662 /// Align attribute for function argument.
3663 struct AAAlignArgument final
3664     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3665   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3666   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3667 
3668   /// See AbstractAttribute::manifest(...).
3669   ChangeStatus manifest(Attributor &A) override {
3670     // If the associated argument is involved in a must-tail call we give up
3671     // because we would need to keep the argument alignments of caller and
3672     // callee in-sync. Just does not seem worth the trouble right now.
3673     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3674       return ChangeStatus::UNCHANGED;
3675     return Base::manifest(A);
3676   }
3677 
3678   /// See AbstractAttribute::trackStatistics()
3679   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3680 };
3681 
3682 struct AAAlignCallSiteArgument final : AAAlignFloating {
3683   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3684       : AAAlignFloating(IRP, A) {}
3685 
3686   /// See AbstractAttribute::manifest(...).
3687   ChangeStatus manifest(Attributor &A) override {
3688     // If the associated argument is involved in a must-tail call we give up
3689     // because we would need to keep the argument alignments of caller and
3690     // callee in-sync. Just does not seem worth the trouble right now.
3691     if (Argument *Arg = getAssociatedArgument())
3692       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3693         return ChangeStatus::UNCHANGED;
3694     ChangeStatus Changed = AAAlignImpl::manifest(A);
3695     MaybeAlign InheritAlign =
3696         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3697     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3698       Changed = ChangeStatus::UNCHANGED;
3699     return Changed;
3700   }
3701 
3702   /// See AbstractAttribute::updateImpl(Attributor &A).
3703   ChangeStatus updateImpl(Attributor &A) override {
3704     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3705     if (Argument *Arg = getAssociatedArgument()) {
3706       // We only take known information from the argument
3707       // so we do not need to track a dependence.
3708       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3709           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3710       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3711     }
3712     return Changed;
3713   }
3714 
3715   /// See AbstractAttribute::trackStatistics()
3716   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3717 };
3718 
3719 /// Align attribute deduction for a call site return value.
3720 struct AAAlignCallSiteReturned final
3721     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3722   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3723   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3724       : Base(IRP, A) {}
3725 
3726   /// See AbstractAttribute::initialize(...).
3727   void initialize(Attributor &A) override {
3728     Base::initialize(A);
3729     Function *F = getAssociatedFunction();
3730     if (!F)
3731       indicatePessimisticFixpoint();
3732   }
3733 
3734   /// See AbstractAttribute::trackStatistics()
3735   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3736 };
3737 
3738 /// ------------------ Function No-Return Attribute ----------------------------
3739 struct AANoReturnImpl : public AANoReturn {
3740   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3741 
3742   /// See AbstractAttribute::initialize(...).
3743   void initialize(Attributor &A) override {
3744     AANoReturn::initialize(A);
3745     Function *F = getAssociatedFunction();
3746     if (!F)
3747       indicatePessimisticFixpoint();
3748   }
3749 
3750   /// See AbstractAttribute::getAsStr().
3751   const std::string getAsStr() const override {
3752     return getAssumed() ? "noreturn" : "may-return";
3753   }
3754 
3755   /// See AbstractAttribute::updateImpl(Attributor &A).
3756   virtual ChangeStatus updateImpl(Attributor &A) override {
3757     auto CheckForNoReturn = [](Instruction &) { return false; };
3758     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3759                                    {(unsigned)Instruction::Ret}))
3760       return indicatePessimisticFixpoint();
3761     return ChangeStatus::UNCHANGED;
3762   }
3763 };
3764 
3765 struct AANoReturnFunction final : AANoReturnImpl {
3766   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3767       : AANoReturnImpl(IRP, A) {}
3768 
3769   /// See AbstractAttribute::trackStatistics()
3770   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3771 };
3772 
3773 /// NoReturn attribute deduction for a call sites.
3774 struct AANoReturnCallSite final : AANoReturnImpl {
3775   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3776       : AANoReturnImpl(IRP, A) {}
3777 
3778   /// See AbstractAttribute::updateImpl(...).
3779   ChangeStatus updateImpl(Attributor &A) override {
3780     // TODO: Once we have call site specific value information we can provide
3781     //       call site specific liveness information and then it makes
3782     //       sense to specialize attributes for call sites arguments instead of
3783     //       redirecting requests to the callee argument.
3784     Function *F = getAssociatedFunction();
3785     const IRPosition &FnPos = IRPosition::function(*F);
3786     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3787     return clampStateAndIndicateChange(
3788         getState(),
3789         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3790   }
3791 
3792   /// See AbstractAttribute::trackStatistics()
3793   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3794 };
3795 
3796 /// ----------------------- Variable Capturing ---------------------------------
3797 
3798 /// A class to hold the state of for no-capture attributes.
3799 struct AANoCaptureImpl : public AANoCapture {
3800   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3801 
3802   /// See AbstractAttribute::initialize(...).
3803   void initialize(Attributor &A) override {
3804     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3805       indicateOptimisticFixpoint();
3806       return;
3807     }
3808     Function *AnchorScope = getAnchorScope();
3809     if (isFnInterfaceKind() &&
3810         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3811       indicatePessimisticFixpoint();
3812       return;
3813     }
3814 
3815     // You cannot "capture" null in the default address space.
3816     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3817         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3818       indicateOptimisticFixpoint();
3819       return;
3820     }
3821 
3822     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3823 
3824     // Check what state the associated function can actually capture.
3825     if (F)
3826       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3827     else
3828       indicatePessimisticFixpoint();
3829   }
3830 
3831   /// See AbstractAttribute::updateImpl(...).
3832   ChangeStatus updateImpl(Attributor &A) override;
3833 
3834   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3835   virtual void
3836   getDeducedAttributes(LLVMContext &Ctx,
3837                        SmallVectorImpl<Attribute> &Attrs) const override {
3838     if (!isAssumedNoCaptureMaybeReturned())
3839       return;
3840 
3841     if (getArgNo() >= 0) {
3842       if (isAssumedNoCapture())
3843         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3844       else if (ManifestInternal)
3845         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3846     }
3847   }
3848 
3849   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3850   /// depending on the ability of the function associated with \p IRP to capture
3851   /// state in memory and through "returning/throwing", respectively.
3852   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3853                                                    const Function &F,
3854                                                    BitIntegerState &State) {
3855     // TODO: Once we have memory behavior attributes we should use them here.
3856 
3857     // If we know we cannot communicate or write to memory, we do not care about
3858     // ptr2int anymore.
3859     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3860         F.getReturnType()->isVoidTy()) {
3861       State.addKnownBits(NO_CAPTURE);
3862       return;
3863     }
3864 
3865     // A function cannot capture state in memory if it only reads memory, it can
3866     // however return/throw state and the state might be influenced by the
3867     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3868     if (F.onlyReadsMemory())
3869       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3870 
3871     // A function cannot communicate state back if it does not through
3872     // exceptions and doesn not return values.
3873     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3874       State.addKnownBits(NOT_CAPTURED_IN_RET);
3875 
3876     // Check existing "returned" attributes.
3877     int ArgNo = IRP.getArgNo();
3878     if (F.doesNotThrow() && ArgNo >= 0) {
3879       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3880         if (F.hasParamAttribute(u, Attribute::Returned)) {
3881           if (u == unsigned(ArgNo))
3882             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3883           else if (F.onlyReadsMemory())
3884             State.addKnownBits(NO_CAPTURE);
3885           else
3886             State.addKnownBits(NOT_CAPTURED_IN_RET);
3887           break;
3888         }
3889     }
3890   }
3891 
3892   /// See AbstractState::getAsStr().
3893   const std::string getAsStr() const override {
3894     if (isKnownNoCapture())
3895       return "known not-captured";
3896     if (isAssumedNoCapture())
3897       return "assumed not-captured";
3898     if (isKnownNoCaptureMaybeReturned())
3899       return "known not-captured-maybe-returned";
3900     if (isAssumedNoCaptureMaybeReturned())
3901       return "assumed not-captured-maybe-returned";
3902     return "assumed-captured";
3903   }
3904 };
3905 
3906 /// Attributor-aware capture tracker.
3907 struct AACaptureUseTracker final : public CaptureTracker {
3908 
3909   /// Create a capture tracker that can lookup in-flight abstract attributes
3910   /// through the Attributor \p A.
3911   ///
3912   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3913   /// search is stopped. If a use leads to a return instruction,
3914   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3915   /// If a use leads to a ptr2int which may capture the value,
3916   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3917   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3918   /// set. All values in \p PotentialCopies are later tracked as well. For every
3919   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3920   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3921   /// conservatively set to true.
3922   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3923                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3924                       SmallVectorImpl<const Value *> &PotentialCopies,
3925                       unsigned &RemainingUsesToExplore)
3926       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3927         PotentialCopies(PotentialCopies),
3928         RemainingUsesToExplore(RemainingUsesToExplore) {}
3929 
3930   /// Determine if \p V maybe captured. *Also updates the state!*
3931   bool valueMayBeCaptured(const Value *V) {
3932     if (V->getType()->isPointerTy()) {
3933       PointerMayBeCaptured(V, this);
3934     } else {
3935       State.indicatePessimisticFixpoint();
3936     }
3937     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3938   }
3939 
3940   /// See CaptureTracker::tooManyUses().
3941   void tooManyUses() override {
3942     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3943   }
3944 
3945   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3946     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3947       return true;
3948     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3949         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3950         DepClassTy::OPTIONAL);
3951     return DerefAA.getAssumedDereferenceableBytes();
3952   }
3953 
3954   /// See CaptureTracker::captured(...).
3955   bool captured(const Use *U) override {
3956     Instruction *UInst = cast<Instruction>(U->getUser());
3957     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3958                       << "\n");
3959 
3960     // Because we may reuse the tracker multiple times we keep track of the
3961     // number of explored uses ourselves as well.
3962     if (RemainingUsesToExplore-- == 0) {
3963       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
3964       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3965                           /* Return */ true);
3966     }
3967 
3968     // Deal with ptr2int by following uses.
3969     if (isa<PtrToIntInst>(UInst)) {
3970       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
3971       return valueMayBeCaptured(UInst);
3972     }
3973 
3974     // Explicitly catch return instructions.
3975     if (isa<ReturnInst>(UInst))
3976       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3977                           /* Return */ true);
3978 
3979     // For now we only use special logic for call sites. However, the tracker
3980     // itself knows about a lot of other non-capturing cases already.
3981     auto *CB = dyn_cast<CallBase>(UInst);
3982     if (!CB || !CB->isArgOperand(U))
3983       return isCapturedIn(/* Memory */ true, /* Integer */ true,
3984                           /* Return */ true);
3985 
3986     unsigned ArgNo = CB->getArgOperandNo(U);
3987     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
3988     // If we have a abstract no-capture attribute for the argument we can use
3989     // it to justify a non-capture attribute here. This allows recursion!
3990     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
3991     if (ArgNoCaptureAA.isAssumedNoCapture())
3992       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3993                           /* Return */ false);
3994     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3995       addPotentialCopy(*CB);
3996       return isCapturedIn(/* Memory */ false, /* Integer */ false,
3997                           /* Return */ false);
3998     }
3999 
4000     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4001     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4002                         /* Return */ true);
4003   }
4004 
4005   /// Register \p CS as potential copy of the value we are checking.
4006   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4007 
4008   /// See CaptureTracker::shouldExplore(...).
4009   bool shouldExplore(const Use *U) override {
4010     // Check liveness and ignore droppable users.
4011     return !U->getUser()->isDroppable() &&
4012            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4013   }
4014 
4015   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4016   /// \p CapturedInRet, then return the appropriate value for use in the
4017   /// CaptureTracker::captured() interface.
4018   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4019                     bool CapturedInRet) {
4020     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4021                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4022     if (CapturedInMem)
4023       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4024     if (CapturedInInt)
4025       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4026     if (CapturedInRet)
4027       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4028     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4029   }
4030 
4031 private:
4032   /// The attributor providing in-flight abstract attributes.
4033   Attributor &A;
4034 
4035   /// The abstract attribute currently updated.
4036   AANoCapture &NoCaptureAA;
4037 
4038   /// The abstract liveness state.
4039   const AAIsDead &IsDeadAA;
4040 
4041   /// The state currently updated.
4042   AANoCapture::StateType &State;
4043 
4044   /// Set of potential copies of the tracked value.
4045   SmallVectorImpl<const Value *> &PotentialCopies;
4046 
4047   /// Global counter to limit the number of explored uses.
4048   unsigned &RemainingUsesToExplore;
4049 };
4050 
4051 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4052   const IRPosition &IRP = getIRPosition();
4053   const Value *V =
4054       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4055   if (!V)
4056     return indicatePessimisticFixpoint();
4057 
4058   const Function *F =
4059       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4060   assert(F && "Expected a function!");
4061   const IRPosition &FnPos = IRPosition::function(*F);
4062   const auto &IsDeadAA =
4063       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4064 
4065   AANoCapture::StateType T;
4066 
4067   // Readonly means we cannot capture through memory.
4068   const auto &FnMemAA =
4069       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4070   if (FnMemAA.isAssumedReadOnly()) {
4071     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4072     if (FnMemAA.isKnownReadOnly())
4073       addKnownBits(NOT_CAPTURED_IN_MEM);
4074     else
4075       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4076   }
4077 
4078   // Make sure all returned values are different than the underlying value.
4079   // TODO: we could do this in a more sophisticated way inside
4080   //       AAReturnedValues, e.g., track all values that escape through returns
4081   //       directly somehow.
4082   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4083     bool SeenConstant = false;
4084     for (auto &It : RVAA.returned_values()) {
4085       if (isa<Constant>(It.first)) {
4086         if (SeenConstant)
4087           return false;
4088         SeenConstant = true;
4089       } else if (!isa<Argument>(It.first) ||
4090                  It.first == getAssociatedArgument())
4091         return false;
4092     }
4093     return true;
4094   };
4095 
4096   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4097       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4098   if (NoUnwindAA.isAssumedNoUnwind()) {
4099     bool IsVoidTy = F->getReturnType()->isVoidTy();
4100     const AAReturnedValues *RVAA =
4101         IsVoidTy ? nullptr
4102                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4103                                                  /* TrackDependence */ true,
4104                                                  DepClassTy::OPTIONAL);
4105     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4106       T.addKnownBits(NOT_CAPTURED_IN_RET);
4107       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4108         return ChangeStatus::UNCHANGED;
4109       if (NoUnwindAA.isKnownNoUnwind() &&
4110           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4111         addKnownBits(NOT_CAPTURED_IN_RET);
4112         if (isKnown(NOT_CAPTURED_IN_MEM))
4113           return indicateOptimisticFixpoint();
4114       }
4115     }
4116   }
4117 
4118   // Use the CaptureTracker interface and logic with the specialized tracker,
4119   // defined in AACaptureUseTracker, that can look at in-flight abstract
4120   // attributes and directly updates the assumed state.
4121   SmallVector<const Value *, 4> PotentialCopies;
4122   unsigned RemainingUsesToExplore =
4123       getDefaultMaxUsesToExploreForCaptureTracking();
4124   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4125                               RemainingUsesToExplore);
4126 
4127   // Check all potential copies of the associated value until we can assume
4128   // none will be captured or we have to assume at least one might be.
4129   unsigned Idx = 0;
4130   PotentialCopies.push_back(V);
4131   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4132     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4133 
4134   AANoCapture::StateType &S = getState();
4135   auto Assumed = S.getAssumed();
4136   S.intersectAssumedBits(T.getAssumed());
4137   if (!isAssumedNoCaptureMaybeReturned())
4138     return indicatePessimisticFixpoint();
4139   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4140                                    : ChangeStatus::CHANGED;
4141 }
4142 
4143 /// NoCapture attribute for function arguments.
4144 struct AANoCaptureArgument final : AANoCaptureImpl {
4145   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4146       : AANoCaptureImpl(IRP, A) {}
4147 
4148   /// See AbstractAttribute::trackStatistics()
4149   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4150 };
4151 
4152 /// NoCapture attribute for call site arguments.
4153 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4154   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4155       : AANoCaptureImpl(IRP, A) {}
4156 
4157   /// See AbstractAttribute::initialize(...).
4158   void initialize(Attributor &A) override {
4159     if (Argument *Arg = getAssociatedArgument())
4160       if (Arg->hasByValAttr())
4161         indicateOptimisticFixpoint();
4162     AANoCaptureImpl::initialize(A);
4163   }
4164 
4165   /// See AbstractAttribute::updateImpl(...).
4166   ChangeStatus updateImpl(Attributor &A) override {
4167     // TODO: Once we have call site specific value information we can provide
4168     //       call site specific liveness information and then it makes
4169     //       sense to specialize attributes for call sites arguments instead of
4170     //       redirecting requests to the callee argument.
4171     Argument *Arg = getAssociatedArgument();
4172     if (!Arg)
4173       return indicatePessimisticFixpoint();
4174     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4175     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4176     return clampStateAndIndicateChange(
4177         getState(),
4178         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4179   }
4180 
4181   /// See AbstractAttribute::trackStatistics()
4182   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4183 };
4184 
4185 /// NoCapture attribute for floating values.
4186 struct AANoCaptureFloating final : AANoCaptureImpl {
4187   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4188       : AANoCaptureImpl(IRP, A) {}
4189 
4190   /// See AbstractAttribute::trackStatistics()
4191   void trackStatistics() const override {
4192     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4193   }
4194 };
4195 
4196 /// NoCapture attribute for function return value.
4197 struct AANoCaptureReturned final : AANoCaptureImpl {
4198   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4199       : AANoCaptureImpl(IRP, A) {
4200     llvm_unreachable("NoCapture is not applicable to function returns!");
4201   }
4202 
4203   /// See AbstractAttribute::initialize(...).
4204   void initialize(Attributor &A) override {
4205     llvm_unreachable("NoCapture is not applicable to function returns!");
4206   }
4207 
4208   /// See AbstractAttribute::updateImpl(...).
4209   ChangeStatus updateImpl(Attributor &A) override {
4210     llvm_unreachable("NoCapture is not applicable to function returns!");
4211   }
4212 
4213   /// See AbstractAttribute::trackStatistics()
4214   void trackStatistics() const override {}
4215 };
4216 
4217 /// NoCapture attribute deduction for a call site return value.
4218 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4219   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4220       : AANoCaptureImpl(IRP, A) {}
4221 
4222   /// See AbstractAttribute::trackStatistics()
4223   void trackStatistics() const override {
4224     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4225   }
4226 };
4227 
4228 /// ------------------ Value Simplify Attribute ----------------------------
4229 struct AAValueSimplifyImpl : AAValueSimplify {
4230   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4231       : AAValueSimplify(IRP, A) {}
4232 
4233   /// See AbstractAttribute::initialize(...).
4234   void initialize(Attributor &A) override {
4235     if (getAssociatedValue().getType()->isVoidTy())
4236       indicatePessimisticFixpoint();
4237   }
4238 
4239   /// See AbstractAttribute::getAsStr().
4240   const std::string getAsStr() const override {
4241     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4242                         : "not-simple";
4243   }
4244 
4245   /// See AbstractAttribute::trackStatistics()
4246   void trackStatistics() const override {}
4247 
4248   /// See AAValueSimplify::getAssumedSimplifiedValue()
4249   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4250     if (!getAssumed())
4251       return const_cast<Value *>(&getAssociatedValue());
4252     return SimplifiedAssociatedValue;
4253   }
4254 
4255   /// Helper function for querying AAValueSimplify and updating candicate.
4256   /// \param QueryingValue Value trying to unify with SimplifiedValue
4257   /// \param AccumulatedSimplifiedValue Current simplification result.
4258   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4259                              Value &QueryingValue,
4260                              Optional<Value *> &AccumulatedSimplifiedValue) {
4261     // FIXME: Add a typecast support.
4262 
4263     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4264         QueryingAA, IRPosition::value(QueryingValue));
4265 
4266     Optional<Value *> QueryingValueSimplified =
4267         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4268 
4269     if (!QueryingValueSimplified.hasValue())
4270       return true;
4271 
4272     if (!QueryingValueSimplified.getValue())
4273       return false;
4274 
4275     Value &QueryingValueSimplifiedUnwrapped =
4276         *QueryingValueSimplified.getValue();
4277 
4278     if (AccumulatedSimplifiedValue.hasValue() &&
4279         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4280         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4281       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4282     if (AccumulatedSimplifiedValue.hasValue() &&
4283         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4284       return true;
4285 
4286     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4287                       << " is assumed to be "
4288                       << QueryingValueSimplifiedUnwrapped << "\n");
4289 
4290     AccumulatedSimplifiedValue = QueryingValueSimplified;
4291     return true;
4292   }
4293 
4294   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4295     if (!getAssociatedValue().getType()->isIntegerTy())
4296       return false;
4297 
4298     const auto &ValueConstantRangeAA =
4299         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4300 
4301     Optional<ConstantInt *> COpt =
4302         ValueConstantRangeAA.getAssumedConstantInt(A);
4303     if (COpt.hasValue()) {
4304       if (auto *C = COpt.getValue())
4305         SimplifiedAssociatedValue = C;
4306       else
4307         return false;
4308     } else {
4309       SimplifiedAssociatedValue = llvm::None;
4310     }
4311     return true;
4312   }
4313 
4314   /// See AbstractAttribute::manifest(...).
4315   ChangeStatus manifest(Attributor &A) override {
4316     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4317 
4318     if (SimplifiedAssociatedValue.hasValue() &&
4319         !SimplifiedAssociatedValue.getValue())
4320       return Changed;
4321 
4322     Value &V = getAssociatedValue();
4323     auto *C = SimplifiedAssociatedValue.hasValue()
4324                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4325                   : UndefValue::get(V.getType());
4326     if (C) {
4327       // We can replace the AssociatedValue with the constant.
4328       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4329         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4330                           << " :: " << *this << "\n");
4331         if (A.changeValueAfterManifest(V, *C))
4332           Changed = ChangeStatus::CHANGED;
4333       }
4334     }
4335 
4336     return Changed | AAValueSimplify::manifest(A);
4337   }
4338 
4339   /// See AbstractState::indicatePessimisticFixpoint(...).
4340   ChangeStatus indicatePessimisticFixpoint() override {
4341     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4342     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4343     SimplifiedAssociatedValue = &getAssociatedValue();
4344     indicateOptimisticFixpoint();
4345     return ChangeStatus::CHANGED;
4346   }
4347 
4348 protected:
4349   // An assumed simplified value. Initially, it is set to Optional::None, which
4350   // means that the value is not clear under current assumption. If in the
4351   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4352   // returns orignal associated value.
4353   Optional<Value *> SimplifiedAssociatedValue;
4354 };
4355 
4356 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4357   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4358       : AAValueSimplifyImpl(IRP, A) {}
4359 
4360   void initialize(Attributor &A) override {
4361     AAValueSimplifyImpl::initialize(A);
4362     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4363       indicatePessimisticFixpoint();
4364     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4365                 /* IgnoreSubsumingPositions */ true))
4366       indicatePessimisticFixpoint();
4367 
4368     // FIXME: This is a hack to prevent us from propagating function poiner in
4369     // the new pass manager CGSCC pass as it creates call edges the
4370     // CallGraphUpdater cannot handle yet.
4371     Value &V = getAssociatedValue();
4372     if (V.getType()->isPointerTy() &&
4373         V.getType()->getPointerElementType()->isFunctionTy() &&
4374         !A.isModulePass())
4375       indicatePessimisticFixpoint();
4376   }
4377 
4378   /// See AbstractAttribute::updateImpl(...).
4379   ChangeStatus updateImpl(Attributor &A) override {
4380     // Byval is only replacable if it is readonly otherwise we would write into
4381     // the replaced value and not the copy that byval creates implicitly.
4382     Argument *Arg = getAssociatedArgument();
4383     if (Arg->hasByValAttr()) {
4384       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4385       //       there is no race by not copying a constant byval.
4386       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4387       if (!MemAA.isAssumedReadOnly())
4388         return indicatePessimisticFixpoint();
4389     }
4390 
4391     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4392 
4393     auto PredForCallSite = [&](AbstractCallSite ACS) {
4394       const IRPosition &ACSArgPos =
4395           IRPosition::callsite_argument(ACS, getArgNo());
4396       // Check if a coresponding argument was found or if it is on not
4397       // associated (which can happen for callback calls).
4398       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4399         return false;
4400 
4401       // We can only propagate thread independent values through callbacks.
4402       // This is different to direct/indirect call sites because for them we
4403       // know the thread executing the caller and callee is the same. For
4404       // callbacks this is not guaranteed, thus a thread dependent value could
4405       // be different for the caller and callee, making it invalid to propagate.
4406       Value &ArgOp = ACSArgPos.getAssociatedValue();
4407       if (ACS.isCallbackCall())
4408         if (auto *C = dyn_cast<Constant>(&ArgOp))
4409           if (C->isThreadDependent())
4410             return false;
4411       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4412     };
4413 
4414     bool AllCallSitesKnown;
4415     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4416                                 AllCallSitesKnown))
4417       if (!askSimplifiedValueForAAValueConstantRange(A))
4418         return indicatePessimisticFixpoint();
4419 
4420     // If a candicate was found in this update, return CHANGED.
4421     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4422                ? ChangeStatus::UNCHANGED
4423                : ChangeStatus ::CHANGED;
4424   }
4425 
4426   /// See AbstractAttribute::trackStatistics()
4427   void trackStatistics() const override {
4428     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4429   }
4430 };
4431 
4432 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4433   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4434       : AAValueSimplifyImpl(IRP, A) {}
4435 
4436   /// See AbstractAttribute::updateImpl(...).
4437   ChangeStatus updateImpl(Attributor &A) override {
4438     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4439 
4440     auto PredForReturned = [&](Value &V) {
4441       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4442     };
4443 
4444     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4445       if (!askSimplifiedValueForAAValueConstantRange(A))
4446         return indicatePessimisticFixpoint();
4447 
4448     // If a candicate was found in this update, return CHANGED.
4449     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4450                ? ChangeStatus::UNCHANGED
4451                : ChangeStatus ::CHANGED;
4452   }
4453 
4454   ChangeStatus manifest(Attributor &A) override {
4455     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4456 
4457     if (SimplifiedAssociatedValue.hasValue() &&
4458         !SimplifiedAssociatedValue.getValue())
4459       return Changed;
4460 
4461     Value &V = getAssociatedValue();
4462     auto *C = SimplifiedAssociatedValue.hasValue()
4463                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4464                   : UndefValue::get(V.getType());
4465     if (C) {
4466       auto PredForReturned =
4467           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4468             // We can replace the AssociatedValue with the constant.
4469             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4470               return true;
4471 
4472             for (ReturnInst *RI : RetInsts) {
4473               if (RI->getFunction() != getAnchorScope())
4474                 continue;
4475               auto *RC = C;
4476               if (RC->getType() != RI->getReturnValue()->getType())
4477                 RC = ConstantExpr::getBitCast(RC,
4478                                               RI->getReturnValue()->getType());
4479               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4480                                 << " in " << *RI << " :: " << *this << "\n");
4481               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4482                 Changed = ChangeStatus::CHANGED;
4483             }
4484             return true;
4485           };
4486       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4487     }
4488 
4489     return Changed | AAValueSimplify::manifest(A);
4490   }
4491 
4492   /// See AbstractAttribute::trackStatistics()
4493   void trackStatistics() const override {
4494     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4495   }
4496 };
4497 
4498 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4499   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4500       : AAValueSimplifyImpl(IRP, A) {}
4501 
4502   /// See AbstractAttribute::initialize(...).
4503   void initialize(Attributor &A) override {
4504     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4505     //        Needs investigation.
4506     // AAValueSimplifyImpl::initialize(A);
4507     Value &V = getAnchorValue();
4508 
4509     // TODO: add other stuffs
4510     if (isa<Constant>(V))
4511       indicatePessimisticFixpoint();
4512   }
4513 
4514   /// See AbstractAttribute::updateImpl(...).
4515   ChangeStatus updateImpl(Attributor &A) override {
4516     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4517 
4518     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4519                             bool Stripped) -> bool {
4520       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4521       if (!Stripped && this == &AA) {
4522         // TODO: Look the instruction and check recursively.
4523 
4524         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4525                           << "\n");
4526         return false;
4527       }
4528       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4529     };
4530 
4531     bool Dummy = false;
4532     if (!genericValueTraversal<AAValueSimplify, bool>(
4533             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI()))
4534       if (!askSimplifiedValueForAAValueConstantRange(A))
4535         return indicatePessimisticFixpoint();
4536 
4537     // If a candicate was found in this update, return CHANGED.
4538 
4539     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4540                ? ChangeStatus::UNCHANGED
4541                : ChangeStatus ::CHANGED;
4542   }
4543 
4544   /// See AbstractAttribute::trackStatistics()
4545   void trackStatistics() const override {
4546     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4547   }
4548 };
4549 
4550 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4551   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4552       : AAValueSimplifyImpl(IRP, A) {}
4553 
4554   /// See AbstractAttribute::initialize(...).
4555   void initialize(Attributor &A) override {
4556     SimplifiedAssociatedValue = &getAnchorValue();
4557     indicateOptimisticFixpoint();
4558   }
4559   /// See AbstractAttribute::initialize(...).
4560   ChangeStatus updateImpl(Attributor &A) override {
4561     llvm_unreachable(
4562         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4563   }
4564   /// See AbstractAttribute::trackStatistics()
4565   void trackStatistics() const override {
4566     STATS_DECLTRACK_FN_ATTR(value_simplify)
4567   }
4568 };
4569 
4570 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4571   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4572       : AAValueSimplifyFunction(IRP, A) {}
4573   /// See AbstractAttribute::trackStatistics()
4574   void trackStatistics() const override {
4575     STATS_DECLTRACK_CS_ATTR(value_simplify)
4576   }
4577 };
4578 
4579 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4580   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4581       : AAValueSimplifyReturned(IRP, A) {}
4582 
4583   /// See AbstractAttribute::manifest(...).
4584   ChangeStatus manifest(Attributor &A) override {
4585     return AAValueSimplifyImpl::manifest(A);
4586   }
4587 
4588   void trackStatistics() const override {
4589     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4590   }
4591 };
4592 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4593   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4594       : AAValueSimplifyFloating(IRP, A) {}
4595 
4596   void trackStatistics() const override {
4597     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4598   }
4599 };
4600 
4601 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4602 struct AAHeapToStackImpl : public AAHeapToStack {
4603   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4604       : AAHeapToStack(IRP, A) {}
4605 
4606   const std::string getAsStr() const override {
4607     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4608   }
4609 
4610   ChangeStatus manifest(Attributor &A) override {
4611     assert(getState().isValidState() &&
4612            "Attempted to manifest an invalid state!");
4613 
4614     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4615     Function *F = getAnchorScope();
4616     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4617 
4618     for (Instruction *MallocCall : MallocCalls) {
4619       // This malloc cannot be replaced.
4620       if (BadMallocCalls.count(MallocCall))
4621         continue;
4622 
4623       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4624         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4625         A.deleteAfterManifest(*FreeCall);
4626         HasChanged = ChangeStatus::CHANGED;
4627       }
4628 
4629       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4630                         << "\n");
4631 
4632       MaybeAlign Alignment;
4633       Constant *Size;
4634       if (isCallocLikeFn(MallocCall, TLI)) {
4635         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4636         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4637         APInt TotalSize = SizeT->getValue() * Num->getValue();
4638         Size =
4639             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4640       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4641         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4642         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4643                                    ->getValue()
4644                                    .getZExtValue());
4645       } else {
4646         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4647       }
4648 
4649       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4650       Instruction *AI =
4651           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4652                          "", MallocCall->getNextNode());
4653 
4654       if (AI->getType() != MallocCall->getType())
4655         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4656                              AI->getNextNode());
4657 
4658       A.changeValueAfterManifest(*MallocCall, *AI);
4659 
4660       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4661         auto *NBB = II->getNormalDest();
4662         BranchInst::Create(NBB, MallocCall->getParent());
4663         A.deleteAfterManifest(*MallocCall);
4664       } else {
4665         A.deleteAfterManifest(*MallocCall);
4666       }
4667 
4668       // Zero out the allocated memory if it was a calloc.
4669       if (isCallocLikeFn(MallocCall, TLI)) {
4670         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4671                                    AI->getNextNode());
4672         Value *Ops[] = {
4673             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4674             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4675 
4676         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4677         Module *M = F->getParent();
4678         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4679         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4680       }
4681       HasChanged = ChangeStatus::CHANGED;
4682     }
4683 
4684     return HasChanged;
4685   }
4686 
4687   /// Collection of all malloc calls in a function.
4688   SmallSetVector<Instruction *, 4> MallocCalls;
4689 
4690   /// Collection of malloc calls that cannot be converted.
4691   DenseSet<const Instruction *> BadMallocCalls;
4692 
4693   /// A map for each malloc call to the set of associated free calls.
4694   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4695 
4696   ChangeStatus updateImpl(Attributor &A) override;
4697 };
4698 
4699 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4700   const Function *F = getAnchorScope();
4701   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4702 
4703   MustBeExecutedContextExplorer &Explorer =
4704       A.getInfoCache().getMustBeExecutedContextExplorer();
4705 
4706   auto FreeCheck = [&](Instruction &I) {
4707     const auto &Frees = FreesForMalloc.lookup(&I);
4708     if (Frees.size() != 1)
4709       return false;
4710     Instruction *UniqueFree = *Frees.begin();
4711     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4712   };
4713 
4714   auto UsesCheck = [&](Instruction &I) {
4715     bool ValidUsesOnly = true;
4716     bool MustUse = true;
4717     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4718       Instruction *UserI = cast<Instruction>(U.getUser());
4719       if (isa<LoadInst>(UserI))
4720         return true;
4721       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4722         if (SI->getValueOperand() == U.get()) {
4723           LLVM_DEBUG(dbgs()
4724                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4725           ValidUsesOnly = false;
4726         } else {
4727           // A store into the malloc'ed memory is fine.
4728         }
4729         return true;
4730       }
4731       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4732         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4733           return true;
4734         // Record malloc.
4735         if (isFreeCall(UserI, TLI)) {
4736           if (MustUse) {
4737             FreesForMalloc[&I].insert(UserI);
4738           } else {
4739             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4740                               << *UserI << "\n");
4741             ValidUsesOnly = false;
4742           }
4743           return true;
4744         }
4745 
4746         unsigned ArgNo = CB->getArgOperandNo(&U);
4747 
4748         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4749             *this, IRPosition::callsite_argument(*CB, ArgNo));
4750 
4751         // If a callsite argument use is nofree, we are fine.
4752         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4753             *this, IRPosition::callsite_argument(*CB, ArgNo));
4754 
4755         if (!NoCaptureAA.isAssumedNoCapture() ||
4756             !ArgNoFreeAA.isAssumedNoFree()) {
4757           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4758           ValidUsesOnly = false;
4759         }
4760         return true;
4761       }
4762 
4763       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4764           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4765         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4766         Follow = true;
4767         return true;
4768       }
4769       // Unknown user for which we can not track uses further (in a way that
4770       // makes sense).
4771       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4772       ValidUsesOnly = false;
4773       return true;
4774     };
4775     A.checkForAllUses(Pred, *this, I);
4776     return ValidUsesOnly;
4777   };
4778 
4779   auto MallocCallocCheck = [&](Instruction &I) {
4780     if (BadMallocCalls.count(&I))
4781       return true;
4782 
4783     bool IsMalloc = isMallocLikeFn(&I, TLI);
4784     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4785     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4786     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4787       BadMallocCalls.insert(&I);
4788       return true;
4789     }
4790 
4791     if (IsMalloc) {
4792       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4793         if (Size->getValue().ule(MaxHeapToStackSize))
4794           if (UsesCheck(I) || FreeCheck(I)) {
4795             MallocCalls.insert(&I);
4796             return true;
4797           }
4798     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4799       // Only if the alignment and sizes are constant.
4800       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4801         if (Size->getValue().ule(MaxHeapToStackSize))
4802           if (UsesCheck(I) || FreeCheck(I)) {
4803             MallocCalls.insert(&I);
4804             return true;
4805           }
4806     } else if (IsCalloc) {
4807       bool Overflow = false;
4808       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4809         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4810           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4811                   .ule(MaxHeapToStackSize))
4812             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4813               MallocCalls.insert(&I);
4814               return true;
4815             }
4816     }
4817 
4818     BadMallocCalls.insert(&I);
4819     return true;
4820   };
4821 
4822   size_t NumBadMallocs = BadMallocCalls.size();
4823 
4824   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4825 
4826   if (NumBadMallocs != BadMallocCalls.size())
4827     return ChangeStatus::CHANGED;
4828 
4829   return ChangeStatus::UNCHANGED;
4830 }
4831 
4832 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4833   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4834       : AAHeapToStackImpl(IRP, A) {}
4835 
4836   /// See AbstractAttribute::trackStatistics().
4837   void trackStatistics() const override {
4838     STATS_DECL(
4839         MallocCalls, Function,
4840         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4841     for (auto *C : MallocCalls)
4842       if (!BadMallocCalls.count(C))
4843         ++BUILD_STAT_NAME(MallocCalls, Function);
4844   }
4845 };
4846 
4847 /// ----------------------- Privatizable Pointers ------------------------------
4848 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4849   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4850       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4851 
4852   ChangeStatus indicatePessimisticFixpoint() override {
4853     AAPrivatizablePtr::indicatePessimisticFixpoint();
4854     PrivatizableType = nullptr;
4855     return ChangeStatus::CHANGED;
4856   }
4857 
4858   /// Identify the type we can chose for a private copy of the underlying
4859   /// argument. None means it is not clear yet, nullptr means there is none.
4860   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4861 
4862   /// Return a privatizable type that encloses both T0 and T1.
4863   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4864   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4865     if (!T0.hasValue())
4866       return T1;
4867     if (!T1.hasValue())
4868       return T0;
4869     if (T0 == T1)
4870       return T0;
4871     return nullptr;
4872   }
4873 
4874   Optional<Type *> getPrivatizableType() const override {
4875     return PrivatizableType;
4876   }
4877 
4878   const std::string getAsStr() const override {
4879     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4880   }
4881 
4882 protected:
4883   Optional<Type *> PrivatizableType;
4884 };
4885 
4886 // TODO: Do this for call site arguments (probably also other values) as well.
4887 
4888 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4889   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4890       : AAPrivatizablePtrImpl(IRP, A) {}
4891 
4892   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4893   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4894     // If this is a byval argument and we know all the call sites (so we can
4895     // rewrite them), there is no need to check them explicitly.
4896     bool AllCallSitesKnown;
4897     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4898         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4899                                true, AllCallSitesKnown))
4900       return getAssociatedValue().getType()->getPointerElementType();
4901 
4902     Optional<Type *> Ty;
4903     unsigned ArgNo = getIRPosition().getArgNo();
4904 
4905     // Make sure the associated call site argument has the same type at all call
4906     // sites and it is an allocation we know is safe to privatize, for now that
4907     // means we only allow alloca instructions.
4908     // TODO: We can additionally analyze the accesses in the callee to  create
4909     //       the type from that information instead. That is a little more
4910     //       involved and will be done in a follow up patch.
4911     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4912       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4913       // Check if a coresponding argument was found or if it is one not
4914       // associated (which can happen for callback calls).
4915       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4916         return false;
4917 
4918       // Check that all call sites agree on a type.
4919       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4920       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4921 
4922       LLVM_DEBUG({
4923         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4924         if (CSTy.hasValue() && CSTy.getValue())
4925           CSTy.getValue()->print(dbgs());
4926         else if (CSTy.hasValue())
4927           dbgs() << "<nullptr>";
4928         else
4929           dbgs() << "<none>";
4930       });
4931 
4932       Ty = combineTypes(Ty, CSTy);
4933 
4934       LLVM_DEBUG({
4935         dbgs() << " : New Type: ";
4936         if (Ty.hasValue() && Ty.getValue())
4937           Ty.getValue()->print(dbgs());
4938         else if (Ty.hasValue())
4939           dbgs() << "<nullptr>";
4940         else
4941           dbgs() << "<none>";
4942         dbgs() << "\n";
4943       });
4944 
4945       return !Ty.hasValue() || Ty.getValue();
4946     };
4947 
4948     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4949       return nullptr;
4950     return Ty;
4951   }
4952 
4953   /// See AbstractAttribute::updateImpl(...).
4954   ChangeStatus updateImpl(Attributor &A) override {
4955     PrivatizableType = identifyPrivatizableType(A);
4956     if (!PrivatizableType.hasValue())
4957       return ChangeStatus::UNCHANGED;
4958     if (!PrivatizableType.getValue())
4959       return indicatePessimisticFixpoint();
4960 
4961     // Avoid arguments with padding for now.
4962     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
4963         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
4964                                                 A.getInfoCache().getDL())) {
4965       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
4966       return indicatePessimisticFixpoint();
4967     }
4968 
4969     // Verify callee and caller agree on how the promoted argument would be
4970     // passed.
4971     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
4972     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
4973     // which doesn't require the arguments ArgumentPromotion wanted to pass.
4974     Function &Fn = *getIRPosition().getAnchorScope();
4975     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
4976     ArgsToPromote.insert(getAssociatedArgument());
4977     const auto *TTI =
4978         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
4979     if (!TTI ||
4980         !ArgumentPromotionPass::areFunctionArgsABICompatible(
4981             Fn, *TTI, ArgsToPromote, Dummy) ||
4982         ArgsToPromote.empty()) {
4983       LLVM_DEBUG(
4984           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
4985                  << Fn.getName() << "\n");
4986       return indicatePessimisticFixpoint();
4987     }
4988 
4989     // Collect the types that will replace the privatizable type in the function
4990     // signature.
4991     SmallVector<Type *, 16> ReplacementTypes;
4992     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
4993 
4994     // Register a rewrite of the argument.
4995     Argument *Arg = getAssociatedArgument();
4996     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
4997       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
4998       return indicatePessimisticFixpoint();
4999     }
5000 
5001     unsigned ArgNo = Arg->getArgNo();
5002 
5003     // Helper to check if for the given call site the associated argument is
5004     // passed to a callback where the privatization would be different.
5005     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5006       SmallVector<const Use *, 4> CallbackUses;
5007       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5008       for (const Use *U : CallbackUses) {
5009         AbstractCallSite CBACS(U);
5010         assert(CBACS && CBACS.isCallbackCall());
5011         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5012           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5013 
5014           LLVM_DEBUG({
5015             dbgs()
5016                 << "[AAPrivatizablePtr] Argument " << *Arg
5017                 << "check if can be privatized in the context of its parent ("
5018                 << Arg->getParent()->getName()
5019                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5020                    "callback ("
5021                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5022                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5023                 << CBACS.getCallArgOperand(CBArg) << " vs "
5024                 << CB.getArgOperand(ArgNo) << "\n"
5025                 << "[AAPrivatizablePtr] " << CBArg << " : "
5026                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5027           });
5028 
5029           if (CBArgNo != int(ArgNo))
5030             continue;
5031           const auto &CBArgPrivAA =
5032               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5033           if (CBArgPrivAA.isValidState()) {
5034             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5035             if (!CBArgPrivTy.hasValue())
5036               continue;
5037             if (CBArgPrivTy.getValue() == PrivatizableType)
5038               continue;
5039           }
5040 
5041           LLVM_DEBUG({
5042             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5043                    << " cannot be privatized in the context of its parent ("
5044                    << Arg->getParent()->getName()
5045                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5046                       "callback ("
5047                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5048                    << ").\n[AAPrivatizablePtr] for which the argument "
5049                       "privatization is not compatible.\n";
5050           });
5051           return false;
5052         }
5053       }
5054       return true;
5055     };
5056 
5057     // Helper to check if for the given call site the associated argument is
5058     // passed to a direct call where the privatization would be different.
5059     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5060       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5061       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5062       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5063              "Expected a direct call operand for callback call operand");
5064 
5065       LLVM_DEBUG({
5066         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5067                << " check if be privatized in the context of its parent ("
5068                << Arg->getParent()->getName()
5069                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5070                   "direct call of ("
5071                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5072                << ").\n";
5073       });
5074 
5075       Function *DCCallee = DC->getCalledFunction();
5076       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5077         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5078             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5079         if (DCArgPrivAA.isValidState()) {
5080           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5081           if (!DCArgPrivTy.hasValue())
5082             return true;
5083           if (DCArgPrivTy.getValue() == PrivatizableType)
5084             return true;
5085         }
5086       }
5087 
5088       LLVM_DEBUG({
5089         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5090                << " cannot be privatized in the context of its parent ("
5091                << Arg->getParent()->getName()
5092                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5093                   "direct call of ("
5094                << ACS.getInstruction()->getCalledFunction()->getName()
5095                << ").\n[AAPrivatizablePtr] for which the argument "
5096                   "privatization is not compatible.\n";
5097       });
5098       return false;
5099     };
5100 
5101     // Helper to check if the associated argument is used at the given abstract
5102     // call site in a way that is incompatible with the privatization assumed
5103     // here.
5104     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5105       if (ACS.isDirectCall())
5106         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5107       if (ACS.isCallbackCall())
5108         return IsCompatiblePrivArgOfDirectCS(ACS);
5109       return false;
5110     };
5111 
5112     bool AllCallSitesKnown;
5113     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5114                                 AllCallSitesKnown))
5115       return indicatePessimisticFixpoint();
5116 
5117     return ChangeStatus::UNCHANGED;
5118   }
5119 
5120   /// Given a type to private \p PrivType, collect the constituates (which are
5121   /// used) in \p ReplacementTypes.
5122   static void
5123   identifyReplacementTypes(Type *PrivType,
5124                            SmallVectorImpl<Type *> &ReplacementTypes) {
5125     // TODO: For now we expand the privatization type to the fullest which can
5126     //       lead to dead arguments that need to be removed later.
5127     assert(PrivType && "Expected privatizable type!");
5128 
5129     // Traverse the type, extract constituate types on the outermost level.
5130     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5131       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5132         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5133     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5134       ReplacementTypes.append(PrivArrayType->getNumElements(),
5135                               PrivArrayType->getElementType());
5136     } else {
5137       ReplacementTypes.push_back(PrivType);
5138     }
5139   }
5140 
5141   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5142   /// The values needed are taken from the arguments of \p F starting at
5143   /// position \p ArgNo.
5144   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5145                                    unsigned ArgNo, Instruction &IP) {
5146     assert(PrivType && "Expected privatizable type!");
5147 
5148     IRBuilder<NoFolder> IRB(&IP);
5149     const DataLayout &DL = F.getParent()->getDataLayout();
5150 
5151     // Traverse the type, build GEPs and stores.
5152     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5153       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5154       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5155         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5156         Value *Ptr = constructPointer(
5157             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5158         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5159       }
5160     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5161       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5162       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5163       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5164         Value *Ptr =
5165             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5166         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5167       }
5168     } else {
5169       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5170     }
5171   }
5172 
5173   /// Extract values from \p Base according to the type \p PrivType at the
5174   /// call position \p ACS. The values are appended to \p ReplacementValues.
5175   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5176                                Value *Base,
5177                                SmallVectorImpl<Value *> &ReplacementValues) {
5178     assert(Base && "Expected base value!");
5179     assert(PrivType && "Expected privatizable type!");
5180     Instruction *IP = ACS.getInstruction();
5181 
5182     IRBuilder<NoFolder> IRB(IP);
5183     const DataLayout &DL = IP->getModule()->getDataLayout();
5184 
5185     if (Base->getType()->getPointerElementType() != PrivType)
5186       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5187                                                  "", ACS.getInstruction());
5188 
5189     // TODO: Improve the alignment of the loads.
5190     // Traverse the type, build GEPs and loads.
5191     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5192       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5193       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5194         Type *PointeeTy = PrivStructType->getElementType(u);
5195         Value *Ptr =
5196             constructPointer(PointeeTy->getPointerTo(), Base,
5197                              PrivStructLayout->getElementOffset(u), IRB, DL);
5198         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5199         L->setAlignment(Align(1));
5200         ReplacementValues.push_back(L);
5201       }
5202     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5203       Type *PointeeTy = PrivArrayType->getElementType();
5204       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5205       Type *PointeePtrTy = PointeeTy->getPointerTo();
5206       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5207         Value *Ptr =
5208             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5209         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5210         L->setAlignment(Align(1));
5211         ReplacementValues.push_back(L);
5212       }
5213     } else {
5214       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5215       L->setAlignment(Align(1));
5216       ReplacementValues.push_back(L);
5217     }
5218   }
5219 
5220   /// See AbstractAttribute::manifest(...)
5221   ChangeStatus manifest(Attributor &A) override {
5222     if (!PrivatizableType.hasValue())
5223       return ChangeStatus::UNCHANGED;
5224     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5225 
5226     // Collect all tail calls in the function as we cannot allow new allocas to
5227     // escape into tail recursion.
5228     // TODO: Be smarter about new allocas escaping into tail calls.
5229     SmallVector<CallInst *, 16> TailCalls;
5230     if (!A.checkForAllInstructions(
5231             [&](Instruction &I) {
5232               CallInst &CI = cast<CallInst>(I);
5233               if (CI.isTailCall())
5234                 TailCalls.push_back(&CI);
5235               return true;
5236             },
5237             *this, {Instruction::Call}))
5238       return ChangeStatus::UNCHANGED;
5239 
5240     Argument *Arg = getAssociatedArgument();
5241 
5242     // Callback to repair the associated function. A new alloca is placed at the
5243     // beginning and initialized with the values passed through arguments. The
5244     // new alloca replaces the use of the old pointer argument.
5245     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5246         [=](const Attributor::ArgumentReplacementInfo &ARI,
5247             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5248           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5249           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5250           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5251                                     Arg->getName() + ".priv", IP);
5252           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5253                                ArgIt->getArgNo(), *IP);
5254           Arg->replaceAllUsesWith(AI);
5255 
5256           for (CallInst *CI : TailCalls)
5257             CI->setTailCall(false);
5258         };
5259 
5260     // Callback to repair a call site of the associated function. The elements
5261     // of the privatizable type are loaded prior to the call and passed to the
5262     // new function version.
5263     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5264         [=](const Attributor::ArgumentReplacementInfo &ARI,
5265             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5266           createReplacementValues(
5267               PrivatizableType.getValue(), ACS,
5268               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5269               NewArgOperands);
5270         };
5271 
5272     // Collect the types that will replace the privatizable type in the function
5273     // signature.
5274     SmallVector<Type *, 16> ReplacementTypes;
5275     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5276 
5277     // Register a rewrite of the argument.
5278     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5279                                            std::move(FnRepairCB),
5280                                            std::move(ACSRepairCB)))
5281       return ChangeStatus::CHANGED;
5282     return ChangeStatus::UNCHANGED;
5283   }
5284 
5285   /// See AbstractAttribute::trackStatistics()
5286   void trackStatistics() const override {
5287     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5288   }
5289 };
5290 
5291 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5292   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5293       : AAPrivatizablePtrImpl(IRP, A) {}
5294 
5295   /// See AbstractAttribute::initialize(...).
5296   virtual void initialize(Attributor &A) override {
5297     // TODO: We can privatize more than arguments.
5298     indicatePessimisticFixpoint();
5299   }
5300 
5301   ChangeStatus updateImpl(Attributor &A) override {
5302     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5303                      "updateImpl will not be called");
5304   }
5305 
5306   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5307   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5308     Value *Obj =
5309         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5310     if (!Obj) {
5311       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5312       return nullptr;
5313     }
5314 
5315     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5316       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5317         if (CI->isOne())
5318           return Obj->getType()->getPointerElementType();
5319     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5320       auto &PrivArgAA =
5321           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5322       if (PrivArgAA.isAssumedPrivatizablePtr())
5323         return Obj->getType()->getPointerElementType();
5324     }
5325 
5326     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5327                          "alloca nor privatizable argument: "
5328                       << *Obj << "!\n");
5329     return nullptr;
5330   }
5331 
5332   /// See AbstractAttribute::trackStatistics()
5333   void trackStatistics() const override {
5334     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5335   }
5336 };
5337 
5338 struct AAPrivatizablePtrCallSiteArgument final
5339     : public AAPrivatizablePtrFloating {
5340   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5341       : AAPrivatizablePtrFloating(IRP, A) {}
5342 
5343   /// See AbstractAttribute::initialize(...).
5344   void initialize(Attributor &A) override {
5345     if (getIRPosition().hasAttr(Attribute::ByVal))
5346       indicateOptimisticFixpoint();
5347   }
5348 
5349   /// See AbstractAttribute::updateImpl(...).
5350   ChangeStatus updateImpl(Attributor &A) override {
5351     PrivatizableType = identifyPrivatizableType(A);
5352     if (!PrivatizableType.hasValue())
5353       return ChangeStatus::UNCHANGED;
5354     if (!PrivatizableType.getValue())
5355       return indicatePessimisticFixpoint();
5356 
5357     const IRPosition &IRP = getIRPosition();
5358     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5359     if (!NoCaptureAA.isAssumedNoCapture()) {
5360       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5361       return indicatePessimisticFixpoint();
5362     }
5363 
5364     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5365     if (!NoAliasAA.isAssumedNoAlias()) {
5366       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5367       return indicatePessimisticFixpoint();
5368     }
5369 
5370     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5371     if (!MemBehaviorAA.isAssumedReadOnly()) {
5372       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5373       return indicatePessimisticFixpoint();
5374     }
5375 
5376     return ChangeStatus::UNCHANGED;
5377   }
5378 
5379   /// See AbstractAttribute::trackStatistics()
5380   void trackStatistics() const override {
5381     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5382   }
5383 };
5384 
5385 struct AAPrivatizablePtrCallSiteReturned final
5386     : public AAPrivatizablePtrFloating {
5387   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5388       : AAPrivatizablePtrFloating(IRP, A) {}
5389 
5390   /// See AbstractAttribute::initialize(...).
5391   void initialize(Attributor &A) override {
5392     // TODO: We can privatize more than arguments.
5393     indicatePessimisticFixpoint();
5394   }
5395 
5396   /// See AbstractAttribute::trackStatistics()
5397   void trackStatistics() const override {
5398     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5399   }
5400 };
5401 
5402 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5403   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5404       : AAPrivatizablePtrFloating(IRP, A) {}
5405 
5406   /// See AbstractAttribute::initialize(...).
5407   void initialize(Attributor &A) override {
5408     // TODO: We can privatize more than arguments.
5409     indicatePessimisticFixpoint();
5410   }
5411 
5412   /// See AbstractAttribute::trackStatistics()
5413   void trackStatistics() const override {
5414     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5415   }
5416 };
5417 
5418 /// -------------------- Memory Behavior Attributes ----------------------------
5419 /// Includes read-none, read-only, and write-only.
5420 /// ----------------------------------------------------------------------------
5421 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5422   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5423       : AAMemoryBehavior(IRP, A) {}
5424 
5425   /// See AbstractAttribute::initialize(...).
5426   void initialize(Attributor &A) override {
5427     intersectAssumedBits(BEST_STATE);
5428     getKnownStateFromValue(getIRPosition(), getState());
5429     IRAttribute::initialize(A);
5430   }
5431 
5432   /// Return the memory behavior information encoded in the IR for \p IRP.
5433   static void getKnownStateFromValue(const IRPosition &IRP,
5434                                      BitIntegerState &State,
5435                                      bool IgnoreSubsumingPositions = false) {
5436     SmallVector<Attribute, 2> Attrs;
5437     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5438     for (const Attribute &Attr : Attrs) {
5439       switch (Attr.getKindAsEnum()) {
5440       case Attribute::ReadNone:
5441         State.addKnownBits(NO_ACCESSES);
5442         break;
5443       case Attribute::ReadOnly:
5444         State.addKnownBits(NO_WRITES);
5445         break;
5446       case Attribute::WriteOnly:
5447         State.addKnownBits(NO_READS);
5448         break;
5449       default:
5450         llvm_unreachable("Unexpected attribute!");
5451       }
5452     }
5453 
5454     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5455       if (!I->mayReadFromMemory())
5456         State.addKnownBits(NO_READS);
5457       if (!I->mayWriteToMemory())
5458         State.addKnownBits(NO_WRITES);
5459     }
5460   }
5461 
5462   /// See AbstractAttribute::getDeducedAttributes(...).
5463   void getDeducedAttributes(LLVMContext &Ctx,
5464                             SmallVectorImpl<Attribute> &Attrs) const override {
5465     assert(Attrs.size() == 0);
5466     if (isAssumedReadNone())
5467       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5468     else if (isAssumedReadOnly())
5469       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5470     else if (isAssumedWriteOnly())
5471       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5472     assert(Attrs.size() <= 1);
5473   }
5474 
5475   /// See AbstractAttribute::manifest(...).
5476   ChangeStatus manifest(Attributor &A) override {
5477     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5478       return ChangeStatus::UNCHANGED;
5479 
5480     const IRPosition &IRP = getIRPosition();
5481 
5482     // Check if we would improve the existing attributes first.
5483     SmallVector<Attribute, 4> DeducedAttrs;
5484     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5485     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5486           return IRP.hasAttr(Attr.getKindAsEnum(),
5487                              /* IgnoreSubsumingPositions */ true);
5488         }))
5489       return ChangeStatus::UNCHANGED;
5490 
5491     // Clear existing attributes.
5492     IRP.removeAttrs(AttrKinds);
5493 
5494     // Use the generic manifest method.
5495     return IRAttribute::manifest(A);
5496   }
5497 
5498   /// See AbstractState::getAsStr().
5499   const std::string getAsStr() const override {
5500     if (isAssumedReadNone())
5501       return "readnone";
5502     if (isAssumedReadOnly())
5503       return "readonly";
5504     if (isAssumedWriteOnly())
5505       return "writeonly";
5506     return "may-read/write";
5507   }
5508 
5509   /// The set of IR attributes AAMemoryBehavior deals with.
5510   static const Attribute::AttrKind AttrKinds[3];
5511 };
5512 
5513 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5514     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5515 
5516 /// Memory behavior attribute for a floating value.
5517 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5518   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5519       : AAMemoryBehaviorImpl(IRP, A) {}
5520 
5521   /// See AbstractAttribute::initialize(...).
5522   void initialize(Attributor &A) override {
5523     AAMemoryBehaviorImpl::initialize(A);
5524     // Initialize the use vector with all direct uses of the associated value.
5525     for (const Use &U : getAssociatedValue().uses())
5526       Uses.insert(&U);
5527   }
5528 
5529   /// See AbstractAttribute::updateImpl(...).
5530   ChangeStatus updateImpl(Attributor &A) override;
5531 
5532   /// See AbstractAttribute::trackStatistics()
5533   void trackStatistics() const override {
5534     if (isAssumedReadNone())
5535       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5536     else if (isAssumedReadOnly())
5537       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5538     else if (isAssumedWriteOnly())
5539       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5540   }
5541 
5542 private:
5543   /// Return true if users of \p UserI might access the underlying
5544   /// variable/location described by \p U and should therefore be analyzed.
5545   bool followUsersOfUseIn(Attributor &A, const Use *U,
5546                           const Instruction *UserI);
5547 
5548   /// Update the state according to the effect of use \p U in \p UserI.
5549   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5550 
5551 protected:
5552   /// Container for (transitive) uses of the associated argument.
5553   SetVector<const Use *> Uses;
5554 };
5555 
5556 /// Memory behavior attribute for function argument.
5557 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5558   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5559       : AAMemoryBehaviorFloating(IRP, A) {}
5560 
5561   /// See AbstractAttribute::initialize(...).
5562   void initialize(Attributor &A) override {
5563     intersectAssumedBits(BEST_STATE);
5564     const IRPosition &IRP = getIRPosition();
5565     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5566     // can query it when we use has/getAttr. That would allow us to reuse the
5567     // initialize of the base class here.
5568     bool HasByVal =
5569         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5570     getKnownStateFromValue(IRP, getState(),
5571                            /* IgnoreSubsumingPositions */ HasByVal);
5572 
5573     // Initialize the use vector with all direct uses of the associated value.
5574     Argument *Arg = getAssociatedArgument();
5575     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5576       indicatePessimisticFixpoint();
5577     } else {
5578       // Initialize the use vector with all direct uses of the associated value.
5579       for (const Use &U : Arg->uses())
5580         Uses.insert(&U);
5581     }
5582   }
5583 
5584   ChangeStatus manifest(Attributor &A) override {
5585     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5586     if (!getAssociatedValue().getType()->isPointerTy())
5587       return ChangeStatus::UNCHANGED;
5588 
5589     // TODO: From readattrs.ll: "inalloca parameters are always
5590     //                           considered written"
5591     if (hasAttr({Attribute::InAlloca})) {
5592       removeKnownBits(NO_WRITES);
5593       removeAssumedBits(NO_WRITES);
5594     }
5595     return AAMemoryBehaviorFloating::manifest(A);
5596   }
5597 
5598   /// See AbstractAttribute::trackStatistics()
5599   void trackStatistics() const override {
5600     if (isAssumedReadNone())
5601       STATS_DECLTRACK_ARG_ATTR(readnone)
5602     else if (isAssumedReadOnly())
5603       STATS_DECLTRACK_ARG_ATTR(readonly)
5604     else if (isAssumedWriteOnly())
5605       STATS_DECLTRACK_ARG_ATTR(writeonly)
5606   }
5607 };
5608 
5609 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5610   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5611       : AAMemoryBehaviorArgument(IRP, A) {}
5612 
5613   /// See AbstractAttribute::initialize(...).
5614   void initialize(Attributor &A) override {
5615     if (Argument *Arg = getAssociatedArgument()) {
5616       if (Arg->hasByValAttr()) {
5617         addKnownBits(NO_WRITES);
5618         removeKnownBits(NO_READS);
5619         removeAssumedBits(NO_READS);
5620       }
5621     }
5622     AAMemoryBehaviorArgument::initialize(A);
5623   }
5624 
5625   /// See AbstractAttribute::updateImpl(...).
5626   ChangeStatus updateImpl(Attributor &A) override {
5627     // TODO: Once we have call site specific value information we can provide
5628     //       call site specific liveness liveness information and then it makes
5629     //       sense to specialize attributes for call sites arguments instead of
5630     //       redirecting requests to the callee argument.
5631     Argument *Arg = getAssociatedArgument();
5632     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5633     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5634     return clampStateAndIndicateChange(
5635         getState(),
5636         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5637   }
5638 
5639   /// See AbstractAttribute::trackStatistics()
5640   void trackStatistics() const override {
5641     if (isAssumedReadNone())
5642       STATS_DECLTRACK_CSARG_ATTR(readnone)
5643     else if (isAssumedReadOnly())
5644       STATS_DECLTRACK_CSARG_ATTR(readonly)
5645     else if (isAssumedWriteOnly())
5646       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5647   }
5648 };
5649 
5650 /// Memory behavior attribute for a call site return position.
5651 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5652   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5653       : AAMemoryBehaviorFloating(IRP, A) {}
5654 
5655   /// See AbstractAttribute::manifest(...).
5656   ChangeStatus manifest(Attributor &A) override {
5657     // We do not annotate returned values.
5658     return ChangeStatus::UNCHANGED;
5659   }
5660 
5661   /// See AbstractAttribute::trackStatistics()
5662   void trackStatistics() const override {}
5663 };
5664 
5665 /// An AA to represent the memory behavior function attributes.
5666 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5667   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5668       : AAMemoryBehaviorImpl(IRP, A) {}
5669 
5670   /// See AbstractAttribute::updateImpl(Attributor &A).
5671   virtual ChangeStatus updateImpl(Attributor &A) override;
5672 
5673   /// See AbstractAttribute::manifest(...).
5674   ChangeStatus manifest(Attributor &A) override {
5675     Function &F = cast<Function>(getAnchorValue());
5676     if (isAssumedReadNone()) {
5677       F.removeFnAttr(Attribute::ArgMemOnly);
5678       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5679       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5680     }
5681     return AAMemoryBehaviorImpl::manifest(A);
5682   }
5683 
5684   /// See AbstractAttribute::trackStatistics()
5685   void trackStatistics() const override {
5686     if (isAssumedReadNone())
5687       STATS_DECLTRACK_FN_ATTR(readnone)
5688     else if (isAssumedReadOnly())
5689       STATS_DECLTRACK_FN_ATTR(readonly)
5690     else if (isAssumedWriteOnly())
5691       STATS_DECLTRACK_FN_ATTR(writeonly)
5692   }
5693 };
5694 
5695 /// AAMemoryBehavior attribute for call sites.
5696 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5697   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5698       : AAMemoryBehaviorImpl(IRP, A) {}
5699 
5700   /// See AbstractAttribute::initialize(...).
5701   void initialize(Attributor &A) override {
5702     AAMemoryBehaviorImpl::initialize(A);
5703     Function *F = getAssociatedFunction();
5704     if (!F || !A.isFunctionIPOAmendable(*F)) {
5705       indicatePessimisticFixpoint();
5706       return;
5707     }
5708   }
5709 
5710   /// See AbstractAttribute::updateImpl(...).
5711   ChangeStatus updateImpl(Attributor &A) override {
5712     // TODO: Once we have call site specific value information we can provide
5713     //       call site specific liveness liveness information and then it makes
5714     //       sense to specialize attributes for call sites arguments instead of
5715     //       redirecting requests to the callee argument.
5716     Function *F = getAssociatedFunction();
5717     const IRPosition &FnPos = IRPosition::function(*F);
5718     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5719     return clampStateAndIndicateChange(
5720         getState(),
5721         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5722   }
5723 
5724   /// See AbstractAttribute::trackStatistics()
5725   void trackStatistics() const override {
5726     if (isAssumedReadNone())
5727       STATS_DECLTRACK_CS_ATTR(readnone)
5728     else if (isAssumedReadOnly())
5729       STATS_DECLTRACK_CS_ATTR(readonly)
5730     else if (isAssumedWriteOnly())
5731       STATS_DECLTRACK_CS_ATTR(writeonly)
5732   }
5733 };
5734 
5735 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5736 
5737   // The current assumed state used to determine a change.
5738   auto AssumedState = getAssumed();
5739 
5740   auto CheckRWInst = [&](Instruction &I) {
5741     // If the instruction has an own memory behavior state, use it to restrict
5742     // the local state. No further analysis is required as the other memory
5743     // state is as optimistic as it gets.
5744     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5745       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5746           *this, IRPosition::callsite_function(*CB));
5747       intersectAssumedBits(MemBehaviorAA.getAssumed());
5748       return !isAtFixpoint();
5749     }
5750 
5751     // Remove access kind modifiers if necessary.
5752     if (I.mayReadFromMemory())
5753       removeAssumedBits(NO_READS);
5754     if (I.mayWriteToMemory())
5755       removeAssumedBits(NO_WRITES);
5756     return !isAtFixpoint();
5757   };
5758 
5759   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5760     return indicatePessimisticFixpoint();
5761 
5762   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5763                                         : ChangeStatus::UNCHANGED;
5764 }
5765 
5766 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5767 
5768   const IRPosition &IRP = getIRPosition();
5769   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5770   AAMemoryBehavior::StateType &S = getState();
5771 
5772   // First, check the function scope. We take the known information and we avoid
5773   // work if the assumed information implies the current assumed information for
5774   // this attribute. This is a valid for all but byval arguments.
5775   Argument *Arg = IRP.getAssociatedArgument();
5776   AAMemoryBehavior::base_t FnMemAssumedState =
5777       AAMemoryBehavior::StateType::getWorstState();
5778   if (!Arg || !Arg->hasByValAttr()) {
5779     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5780         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5781     FnMemAssumedState = FnMemAA.getAssumed();
5782     S.addKnownBits(FnMemAA.getKnown());
5783     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5784       return ChangeStatus::UNCHANGED;
5785   }
5786 
5787   // Make sure the value is not captured (except through "return"), if
5788   // it is, any information derived would be irrelevant anyway as we cannot
5789   // check the potential aliases introduced by the capture. However, no need
5790   // to fall back to anythign less optimistic than the function state.
5791   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5792       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5793   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5794     S.intersectAssumedBits(FnMemAssumedState);
5795     return ChangeStatus::CHANGED;
5796   }
5797 
5798   // The current assumed state used to determine a change.
5799   auto AssumedState = S.getAssumed();
5800 
5801   // Liveness information to exclude dead users.
5802   // TODO: Take the FnPos once we have call site specific liveness information.
5803   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5804       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5805       /* TrackDependence */ false);
5806 
5807   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5808   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5809     const Use *U = Uses[i];
5810     Instruction *UserI = cast<Instruction>(U->getUser());
5811     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5812                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5813                       << "]\n");
5814     if (A.isAssumedDead(*U, this, &LivenessAA))
5815       continue;
5816 
5817     // Droppable users, e.g., llvm::assume does not actually perform any action.
5818     if (UserI->isDroppable())
5819       continue;
5820 
5821     // Check if the users of UserI should also be visited.
5822     if (followUsersOfUseIn(A, U, UserI))
5823       for (const Use &UserIUse : UserI->uses())
5824         Uses.insert(&UserIUse);
5825 
5826     // If UserI might touch memory we analyze the use in detail.
5827     if (UserI->mayReadOrWriteMemory())
5828       analyzeUseIn(A, U, UserI);
5829   }
5830 
5831   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5832                                         : ChangeStatus::UNCHANGED;
5833 }
5834 
5835 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5836                                                   const Instruction *UserI) {
5837   // The loaded value is unrelated to the pointer argument, no need to
5838   // follow the users of the load.
5839   if (isa<LoadInst>(UserI))
5840     return false;
5841 
5842   // By default we follow all uses assuming UserI might leak information on U,
5843   // we have special handling for call sites operands though.
5844   const auto *CB = dyn_cast<CallBase>(UserI);
5845   if (!CB || !CB->isArgOperand(U))
5846     return true;
5847 
5848   // If the use is a call argument known not to be captured, the users of
5849   // the call do not need to be visited because they have to be unrelated to
5850   // the input. Note that this check is not trivial even though we disallow
5851   // general capturing of the underlying argument. The reason is that the
5852   // call might the argument "through return", which we allow and for which we
5853   // need to check call users.
5854   if (U->get()->getType()->isPointerTy()) {
5855     unsigned ArgNo = CB->getArgOperandNo(U);
5856     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5857         *this, IRPosition::callsite_argument(*CB, ArgNo),
5858         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5859     return !ArgNoCaptureAA.isAssumedNoCapture();
5860   }
5861 
5862   return true;
5863 }
5864 
5865 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5866                                             const Instruction *UserI) {
5867   assert(UserI->mayReadOrWriteMemory());
5868 
5869   switch (UserI->getOpcode()) {
5870   default:
5871     // TODO: Handle all atomics and other side-effect operations we know of.
5872     break;
5873   case Instruction::Load:
5874     // Loads cause the NO_READS property to disappear.
5875     removeAssumedBits(NO_READS);
5876     return;
5877 
5878   case Instruction::Store:
5879     // Stores cause the NO_WRITES property to disappear if the use is the
5880     // pointer operand. Note that we do assume that capturing was taken care of
5881     // somewhere else.
5882     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5883       removeAssumedBits(NO_WRITES);
5884     return;
5885 
5886   case Instruction::Call:
5887   case Instruction::CallBr:
5888   case Instruction::Invoke: {
5889     // For call sites we look at the argument memory behavior attribute (this
5890     // could be recursive!) in order to restrict our own state.
5891     const auto *CB = cast<CallBase>(UserI);
5892 
5893     // Give up on operand bundles.
5894     if (CB->isBundleOperand(U)) {
5895       indicatePessimisticFixpoint();
5896       return;
5897     }
5898 
5899     // Calling a function does read the function pointer, maybe write it if the
5900     // function is self-modifying.
5901     if (CB->isCallee(U)) {
5902       removeAssumedBits(NO_READS);
5903       break;
5904     }
5905 
5906     // Adjust the possible access behavior based on the information on the
5907     // argument.
5908     IRPosition Pos;
5909     if (U->get()->getType()->isPointerTy())
5910       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5911     else
5912       Pos = IRPosition::callsite_function(*CB);
5913     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5914         *this, Pos,
5915         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5916     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5917     // and at least "known".
5918     intersectAssumedBits(MemBehaviorAA.getAssumed());
5919     return;
5920   }
5921   };
5922 
5923   // Generally, look at the "may-properties" and adjust the assumed state if we
5924   // did not trigger special handling before.
5925   if (UserI->mayReadFromMemory())
5926     removeAssumedBits(NO_READS);
5927   if (UserI->mayWriteToMemory())
5928     removeAssumedBits(NO_WRITES);
5929 }
5930 
5931 } // namespace
5932 
5933 /// -------------------- Memory Locations Attributes ---------------------------
5934 /// Includes read-none, argmemonly, inaccessiblememonly,
5935 /// inaccessiblememorargmemonly
5936 /// ----------------------------------------------------------------------------
5937 
5938 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5939     AAMemoryLocation::MemoryLocationsKind MLK) {
5940   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5941     return "all memory";
5942   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5943     return "no memory";
5944   std::string S = "memory:";
5945   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5946     S += "stack,";
5947   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5948     S += "constant,";
5949   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5950     S += "internal global,";
5951   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5952     S += "external global,";
5953   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5954     S += "argument,";
5955   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5956     S += "inaccessible,";
5957   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5958     S += "malloced,";
5959   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
5960     S += "unknown,";
5961   S.pop_back();
5962   return S;
5963 }
5964 
5965 namespace {
5966 struct AAMemoryLocationImpl : public AAMemoryLocation {
5967 
5968   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
5969       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
5970     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
5971       AccessKind2Accesses[u] = nullptr;
5972   }
5973 
5974   ~AAMemoryLocationImpl() {
5975     // The AccessSets are allocated via a BumpPtrAllocator, we call
5976     // the destructor manually.
5977     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
5978       if (AccessKind2Accesses[u])
5979         AccessKind2Accesses[u]->~AccessSet();
5980   }
5981 
5982   /// See AbstractAttribute::initialize(...).
5983   void initialize(Attributor &A) override {
5984     intersectAssumedBits(BEST_STATE);
5985     getKnownStateFromValue(getIRPosition(), getState());
5986     IRAttribute::initialize(A);
5987   }
5988 
5989   /// Return the memory behavior information encoded in the IR for \p IRP.
5990   static void getKnownStateFromValue(const IRPosition &IRP,
5991                                      BitIntegerState &State,
5992                                      bool IgnoreSubsumingPositions = false) {
5993     SmallVector<Attribute, 2> Attrs;
5994     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5995     for (const Attribute &Attr : Attrs) {
5996       switch (Attr.getKindAsEnum()) {
5997       case Attribute::ReadNone:
5998         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
5999         break;
6000       case Attribute::InaccessibleMemOnly:
6001         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6002         break;
6003       case Attribute::ArgMemOnly:
6004         State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6005         break;
6006       case Attribute::InaccessibleMemOrArgMemOnly:
6007         State.addKnownBits(
6008             inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6009         break;
6010       default:
6011         llvm_unreachable("Unexpected attribute!");
6012       }
6013     }
6014   }
6015 
6016   /// See AbstractAttribute::getDeducedAttributes(...).
6017   void getDeducedAttributes(LLVMContext &Ctx,
6018                             SmallVectorImpl<Attribute> &Attrs) const override {
6019     assert(Attrs.size() == 0);
6020     if (isAssumedReadNone()) {
6021       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6022     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6023       if (isAssumedInaccessibleMemOnly())
6024         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6025       else if (isAssumedArgMemOnly())
6026         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6027       else if (isAssumedInaccessibleOrArgMemOnly())
6028         Attrs.push_back(
6029             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6030     }
6031     assert(Attrs.size() <= 1);
6032   }
6033 
6034   /// See AbstractAttribute::manifest(...).
6035   ChangeStatus manifest(Attributor &A) override {
6036     const IRPosition &IRP = getIRPosition();
6037 
6038     // Check if we would improve the existing attributes first.
6039     SmallVector<Attribute, 4> DeducedAttrs;
6040     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6041     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6042           return IRP.hasAttr(Attr.getKindAsEnum(),
6043                              /* IgnoreSubsumingPositions */ true);
6044         }))
6045       return ChangeStatus::UNCHANGED;
6046 
6047     // Clear existing attributes.
6048     IRP.removeAttrs(AttrKinds);
6049     if (isAssumedReadNone())
6050       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6051 
6052     // Use the generic manifest method.
6053     return IRAttribute::manifest(A);
6054   }
6055 
6056   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6057   bool checkForAllAccessesToMemoryKind(
6058       function_ref<bool(const Instruction *, const Value *, AccessKind,
6059                         MemoryLocationsKind)>
6060           Pred,
6061       MemoryLocationsKind RequestedMLK) const override {
6062     if (!isValidState())
6063       return false;
6064 
6065     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6066     if (AssumedMLK == NO_LOCATIONS)
6067       return true;
6068 
6069     unsigned Idx = 0;
6070     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6071          CurMLK *= 2, ++Idx) {
6072       if (CurMLK & RequestedMLK)
6073         continue;
6074 
6075       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6076         for (const AccessInfo &AI : *Accesses)
6077           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6078             return false;
6079     }
6080 
6081     return true;
6082   }
6083 
6084   ChangeStatus indicatePessimisticFixpoint() override {
6085     // If we give up and indicate a pessimistic fixpoint this instruction will
6086     // become an access for all potential access kinds:
6087     // TODO: Add pointers for argmemonly and globals to improve the results of
6088     //       checkForAllAccessesToMemoryKind.
6089     bool Changed = false;
6090     MemoryLocationsKind KnownMLK = getKnown();
6091     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6092     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6093       if (!(CurMLK & KnownMLK))
6094         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6095                                   getAccessKindFromInst(I));
6096     return AAMemoryLocation::indicatePessimisticFixpoint();
6097   }
6098 
6099 protected:
6100   /// Helper struct to tie together an instruction that has a read or write
6101   /// effect with the pointer it accesses (if any).
6102   struct AccessInfo {
6103 
6104     /// The instruction that caused the access.
6105     const Instruction *I;
6106 
6107     /// The base pointer that is accessed, or null if unknown.
6108     const Value *Ptr;
6109 
6110     /// The kind of access (read/write/read+write).
6111     AccessKind Kind;
6112 
6113     bool operator==(const AccessInfo &RHS) const {
6114       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6115     }
6116     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6117       if (LHS.I != RHS.I)
6118         return LHS.I < RHS.I;
6119       if (LHS.Ptr != RHS.Ptr)
6120         return LHS.Ptr < RHS.Ptr;
6121       if (LHS.Kind != RHS.Kind)
6122         return LHS.Kind < RHS.Kind;
6123       return false;
6124     }
6125   };
6126 
6127   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6128   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6129   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6130   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6131 
6132   /// Return the kind(s) of location that may be accessed by \p V.
6133   AAMemoryLocation::MemoryLocationsKind
6134   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6135 
6136   /// Return the access kind as determined by \p I.
6137   AccessKind getAccessKindFromInst(const Instruction *I) {
6138     AccessKind AK = READ_WRITE;
6139     if (I) {
6140       AK = I->mayReadFromMemory() ? READ : NONE;
6141       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6142     }
6143     return AK;
6144   }
6145 
6146   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6147   /// an access of kind \p AK to a \p MLK memory location with the access
6148   /// pointer \p Ptr.
6149   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6150                                  MemoryLocationsKind MLK, const Instruction *I,
6151                                  const Value *Ptr, bool &Changed,
6152                                  AccessKind AK = READ_WRITE) {
6153 
6154     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6155     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6156     if (!Accesses)
6157       Accesses = new (Allocator) AccessSet();
6158     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6159     State.removeAssumedBits(MLK);
6160   }
6161 
6162   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6163   /// arguments, and update the state and access map accordingly.
6164   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6165                           AAMemoryLocation::StateType &State, bool &Changed);
6166 
6167   /// Used to allocate access sets.
6168   BumpPtrAllocator &Allocator;
6169 
6170   /// The set of IR attributes AAMemoryLocation deals with.
6171   static const Attribute::AttrKind AttrKinds[4];
6172 };
6173 
6174 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6175     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6176     Attribute::InaccessibleMemOrArgMemOnly};
6177 
6178 void AAMemoryLocationImpl::categorizePtrValue(
6179     Attributor &A, const Instruction &I, const Value &Ptr,
6180     AAMemoryLocation::StateType &State, bool &Changed) {
6181   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6182                     << Ptr << " ["
6183                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6184 
6185   auto StripGEPCB = [](Value *V) -> Value * {
6186     auto *GEP = dyn_cast<GEPOperator>(V);
6187     while (GEP) {
6188       V = GEP->getPointerOperand();
6189       GEP = dyn_cast<GEPOperator>(V);
6190     }
6191     return V;
6192   };
6193 
6194   auto VisitValueCB = [&](Value &V, const Instruction *,
6195                           AAMemoryLocation::StateType &T,
6196                           bool Stripped) -> bool {
6197     MemoryLocationsKind MLK = NO_LOCATIONS;
6198     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6199     if (isa<UndefValue>(V))
6200       return true;
6201     if (auto *Arg = dyn_cast<Argument>(&V)) {
6202       if (Arg->hasByValAttr())
6203         MLK = NO_LOCAL_MEM;
6204       else
6205         MLK = NO_ARGUMENT_MEM;
6206     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6207       if (GV->hasLocalLinkage())
6208         MLK = NO_GLOBAL_INTERNAL_MEM;
6209       else
6210         MLK = NO_GLOBAL_EXTERNAL_MEM;
6211     } else if (isa<AllocaInst>(V))
6212       MLK = NO_LOCAL_MEM;
6213     else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6214       const auto &NoAliasAA =
6215           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6216       if (NoAliasAA.isAssumedNoAlias())
6217         MLK = NO_MALLOCED_MEM;
6218       else
6219         MLK = NO_UNKOWN_MEM;
6220     } else {
6221       MLK = NO_UNKOWN_MEM;
6222     }
6223 
6224     assert(MLK != NO_LOCATIONS && "No location specified!");
6225     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6226                               getAccessKindFromInst(&I));
6227     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6228                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6229                       << "\n");
6230     return true;
6231   };
6232 
6233   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6234           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6235           /* MaxValues */ 32, StripGEPCB)) {
6236     LLVM_DEBUG(
6237         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6238     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6239                               getAccessKindFromInst(&I));
6240   } else {
6241     LLVM_DEBUG(
6242         dbgs()
6243         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6244         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6245   }
6246 }
6247 
6248 AAMemoryLocation::MemoryLocationsKind
6249 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6250                                                   bool &Changed) {
6251   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6252                     << I << "\n");
6253 
6254   AAMemoryLocation::StateType AccessedLocs;
6255   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6256 
6257   if (auto *CB = dyn_cast<CallBase>(&I)) {
6258 
6259     // First check if we assume any memory is access is visible.
6260     const auto &CBMemLocationAA =
6261         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6262     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6263                       << " [" << CBMemLocationAA << "]\n");
6264 
6265     if (CBMemLocationAA.isAssumedReadNone())
6266       return NO_LOCATIONS;
6267 
6268     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6269       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6270                                 Changed, getAccessKindFromInst(&I));
6271       return AccessedLocs.getAssumed();
6272     }
6273 
6274     uint32_t CBAssumedNotAccessedLocs =
6275         CBMemLocationAA.getAssumedNotAccessedLocation();
6276 
6277     // Set the argmemonly and global bit as we handle them separately below.
6278     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6279         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6280 
6281     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6282       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6283         continue;
6284       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6285                                 getAccessKindFromInst(&I));
6286     }
6287 
6288     // Now handle global memory if it might be accessed. This is slightly tricky
6289     // as NO_GLOBAL_MEM has multiple bits set.
6290     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6291     if (HasGlobalAccesses) {
6292       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6293                             AccessKind Kind, MemoryLocationsKind MLK) {
6294         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6295                                   getAccessKindFromInst(&I));
6296         return true;
6297       };
6298       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6299               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6300         return AccessedLocs.getWorstState();
6301     }
6302 
6303     LLVM_DEBUG(
6304         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6305                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6306 
6307     // Now handle argument memory if it might be accessed.
6308     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6309     if (HasArgAccesses) {
6310       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6311            ++ArgNo) {
6312 
6313         // Skip non-pointer arguments.
6314         const Value *ArgOp = CB->getArgOperand(ArgNo);
6315         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6316           continue;
6317 
6318         // Skip readnone arguments.
6319         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6320         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6321             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6322 
6323         if (ArgOpMemLocationAA.isAssumedReadNone())
6324           continue;
6325 
6326         // Categorize potentially accessed pointer arguments as if there was an
6327         // access instruction with them as pointer.
6328         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6329       }
6330     }
6331 
6332     LLVM_DEBUG(
6333         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6334                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6335 
6336     return AccessedLocs.getAssumed();
6337   }
6338 
6339   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6340     LLVM_DEBUG(
6341         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6342                << I << " [" << *Ptr << "]\n");
6343     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6344     return AccessedLocs.getAssumed();
6345   }
6346 
6347   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6348                     << I << "\n");
6349   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6350                             getAccessKindFromInst(&I));
6351   return AccessedLocs.getAssumed();
6352 }
6353 
6354 /// An AA to represent the memory behavior function attributes.
6355 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6356   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6357       : AAMemoryLocationImpl(IRP, A) {}
6358 
6359   /// See AbstractAttribute::updateImpl(Attributor &A).
6360   virtual ChangeStatus updateImpl(Attributor &A) override {
6361 
6362     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6363         *this, getIRPosition(), /* TrackDependence */ false);
6364     if (MemBehaviorAA.isAssumedReadNone()) {
6365       if (MemBehaviorAA.isKnownReadNone())
6366         return indicateOptimisticFixpoint();
6367       assert(isAssumedReadNone() &&
6368              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6369       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6370       return ChangeStatus::UNCHANGED;
6371     }
6372 
6373     // The current assumed state used to determine a change.
6374     auto AssumedState = getAssumed();
6375     bool Changed = false;
6376 
6377     auto CheckRWInst = [&](Instruction &I) {
6378       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6379       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6380                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6381       removeAssumedBits(inverseLocation(MLK, false, false));
6382       return true;
6383     };
6384 
6385     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6386       return indicatePessimisticFixpoint();
6387 
6388     Changed |= AssumedState != getAssumed();
6389     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6390   }
6391 
6392   /// See AbstractAttribute::trackStatistics()
6393   void trackStatistics() const override {
6394     if (isAssumedReadNone())
6395       STATS_DECLTRACK_FN_ATTR(readnone)
6396     else if (isAssumedArgMemOnly())
6397       STATS_DECLTRACK_FN_ATTR(argmemonly)
6398     else if (isAssumedInaccessibleMemOnly())
6399       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6400     else if (isAssumedInaccessibleOrArgMemOnly())
6401       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6402   }
6403 };
6404 
6405 /// AAMemoryLocation attribute for call sites.
6406 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6407   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6408       : AAMemoryLocationImpl(IRP, A) {}
6409 
6410   /// See AbstractAttribute::initialize(...).
6411   void initialize(Attributor &A) override {
6412     AAMemoryLocationImpl::initialize(A);
6413     Function *F = getAssociatedFunction();
6414     if (!F || !A.isFunctionIPOAmendable(*F)) {
6415       indicatePessimisticFixpoint();
6416       return;
6417     }
6418   }
6419 
6420   /// See AbstractAttribute::updateImpl(...).
6421   ChangeStatus updateImpl(Attributor &A) override {
6422     // TODO: Once we have call site specific value information we can provide
6423     //       call site specific liveness liveness information and then it makes
6424     //       sense to specialize attributes for call sites arguments instead of
6425     //       redirecting requests to the callee argument.
6426     Function *F = getAssociatedFunction();
6427     const IRPosition &FnPos = IRPosition::function(*F);
6428     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6429     bool Changed = false;
6430     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6431                           AccessKind Kind, MemoryLocationsKind MLK) {
6432       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6433                                 getAccessKindFromInst(I));
6434       return true;
6435     };
6436     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6437       return indicatePessimisticFixpoint();
6438     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6439   }
6440 
6441   /// See AbstractAttribute::trackStatistics()
6442   void trackStatistics() const override {
6443     if (isAssumedReadNone())
6444       STATS_DECLTRACK_CS_ATTR(readnone)
6445   }
6446 };
6447 
6448 /// ------------------ Value Constant Range Attribute -------------------------
6449 
6450 struct AAValueConstantRangeImpl : AAValueConstantRange {
6451   using StateType = IntegerRangeState;
6452   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6453       : AAValueConstantRange(IRP, A) {}
6454 
6455   /// See AbstractAttribute::getAsStr().
6456   const std::string getAsStr() const override {
6457     std::string Str;
6458     llvm::raw_string_ostream OS(Str);
6459     OS << "range(" << getBitWidth() << ")<";
6460     getKnown().print(OS);
6461     OS << " / ";
6462     getAssumed().print(OS);
6463     OS << ">";
6464     return OS.str();
6465   }
6466 
6467   /// Helper function to get a SCEV expr for the associated value at program
6468   /// point \p I.
6469   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6470     if (!getAnchorScope())
6471       return nullptr;
6472 
6473     ScalarEvolution *SE =
6474         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6475             *getAnchorScope());
6476 
6477     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6478         *getAnchorScope());
6479 
6480     if (!SE || !LI)
6481       return nullptr;
6482 
6483     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6484     if (!I)
6485       return S;
6486 
6487     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6488   }
6489 
6490   /// Helper function to get a range from SCEV for the associated value at
6491   /// program point \p I.
6492   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6493                                          const Instruction *I = nullptr) const {
6494     if (!getAnchorScope())
6495       return getWorstState(getBitWidth());
6496 
6497     ScalarEvolution *SE =
6498         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6499             *getAnchorScope());
6500 
6501     const SCEV *S = getSCEV(A, I);
6502     if (!SE || !S)
6503       return getWorstState(getBitWidth());
6504 
6505     return SE->getUnsignedRange(S);
6506   }
6507 
6508   /// Helper function to get a range from LVI for the associated value at
6509   /// program point \p I.
6510   ConstantRange
6511   getConstantRangeFromLVI(Attributor &A,
6512                           const Instruction *CtxI = nullptr) const {
6513     if (!getAnchorScope())
6514       return getWorstState(getBitWidth());
6515 
6516     LazyValueInfo *LVI =
6517         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6518             *getAnchorScope());
6519 
6520     if (!LVI || !CtxI)
6521       return getWorstState(getBitWidth());
6522     return LVI->getConstantRange(&getAssociatedValue(),
6523                                  const_cast<BasicBlock *>(CtxI->getParent()),
6524                                  const_cast<Instruction *>(CtxI));
6525   }
6526 
6527   /// See AAValueConstantRange::getKnownConstantRange(..).
6528   ConstantRange
6529   getKnownConstantRange(Attributor &A,
6530                         const Instruction *CtxI = nullptr) const override {
6531     if (!CtxI || CtxI == getCtxI())
6532       return getKnown();
6533 
6534     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6535     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6536     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6537   }
6538 
6539   /// See AAValueConstantRange::getAssumedConstantRange(..).
6540   ConstantRange
6541   getAssumedConstantRange(Attributor &A,
6542                           const Instruction *CtxI = nullptr) const override {
6543     // TODO: Make SCEV use Attributor assumption.
6544     //       We may be able to bound a variable range via assumptions in
6545     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6546     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6547 
6548     if (!CtxI || CtxI == getCtxI())
6549       return getAssumed();
6550 
6551     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6552     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6553     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6554   }
6555 
6556   /// See AbstractAttribute::initialize(..).
6557   void initialize(Attributor &A) override {
6558     // Intersect a range given by SCEV.
6559     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6560 
6561     // Intersect a range given by LVI.
6562     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6563   }
6564 
6565   /// Helper function to create MDNode for range metadata.
6566   static MDNode *
6567   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6568                             const ConstantRange &AssumedConstantRange) {
6569     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6570                                   Ty, AssumedConstantRange.getLower())),
6571                               ConstantAsMetadata::get(ConstantInt::get(
6572                                   Ty, AssumedConstantRange.getUpper()))};
6573     return MDNode::get(Ctx, LowAndHigh);
6574   }
6575 
6576   /// Return true if \p Assumed is included in \p KnownRanges.
6577   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6578 
6579     if (Assumed.isFullSet())
6580       return false;
6581 
6582     if (!KnownRanges)
6583       return true;
6584 
6585     // If multiple ranges are annotated in IR, we give up to annotate assumed
6586     // range for now.
6587 
6588     // TODO:  If there exists a known range which containts assumed range, we
6589     // can say assumed range is better.
6590     if (KnownRanges->getNumOperands() > 2)
6591       return false;
6592 
6593     ConstantInt *Lower =
6594         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6595     ConstantInt *Upper =
6596         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6597 
6598     ConstantRange Known(Lower->getValue(), Upper->getValue());
6599     return Known.contains(Assumed) && Known != Assumed;
6600   }
6601 
6602   /// Helper function to set range metadata.
6603   static bool
6604   setRangeMetadataIfisBetterRange(Instruction *I,
6605                                   const ConstantRange &AssumedConstantRange) {
6606     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6607     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6608       if (!AssumedConstantRange.isEmptySet()) {
6609         I->setMetadata(LLVMContext::MD_range,
6610                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6611                                                  AssumedConstantRange));
6612         return true;
6613       }
6614     }
6615     return false;
6616   }
6617 
6618   /// See AbstractAttribute::manifest()
6619   ChangeStatus manifest(Attributor &A) override {
6620     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6621     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6622     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6623 
6624     auto &V = getAssociatedValue();
6625     if (!AssumedConstantRange.isEmptySet() &&
6626         !AssumedConstantRange.isSingleElement()) {
6627       if (Instruction *I = dyn_cast<Instruction>(&V))
6628         if (isa<CallInst>(I) || isa<LoadInst>(I))
6629           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6630             Changed = ChangeStatus::CHANGED;
6631     }
6632 
6633     return Changed;
6634   }
6635 };
6636 
6637 struct AAValueConstantRangeArgument final
6638     : AAArgumentFromCallSiteArguments<
6639           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6640   using Base = AAArgumentFromCallSiteArguments<
6641       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6642   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6643       : Base(IRP, A) {}
6644 
6645   /// See AbstractAttribute::initialize(..).
6646   void initialize(Attributor &A) override {
6647     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6648       indicatePessimisticFixpoint();
6649     } else {
6650       Base::initialize(A);
6651     }
6652   }
6653 
6654   /// See AbstractAttribute::trackStatistics()
6655   void trackStatistics() const override {
6656     STATS_DECLTRACK_ARG_ATTR(value_range)
6657   }
6658 };
6659 
6660 struct AAValueConstantRangeReturned
6661     : AAReturnedFromReturnedValues<AAValueConstantRange,
6662                                    AAValueConstantRangeImpl> {
6663   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6664                                             AAValueConstantRangeImpl>;
6665   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6666       : Base(IRP, A) {}
6667 
6668   /// See AbstractAttribute::initialize(...).
6669   void initialize(Attributor &A) override {}
6670 
6671   /// See AbstractAttribute::trackStatistics()
6672   void trackStatistics() const override {
6673     STATS_DECLTRACK_FNRET_ATTR(value_range)
6674   }
6675 };
6676 
6677 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6678   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6679       : AAValueConstantRangeImpl(IRP, A) {}
6680 
6681   /// See AbstractAttribute::initialize(...).
6682   void initialize(Attributor &A) override {
6683     AAValueConstantRangeImpl::initialize(A);
6684     Value &V = getAssociatedValue();
6685 
6686     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6687       unionAssumed(ConstantRange(C->getValue()));
6688       indicateOptimisticFixpoint();
6689       return;
6690     }
6691 
6692     if (isa<UndefValue>(&V)) {
6693       // Collapse the undef state to 0.
6694       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6695       indicateOptimisticFixpoint();
6696       return;
6697     }
6698 
6699     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6700       return;
6701     // If it is a load instruction with range metadata, use it.
6702     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6703       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6704         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6705         return;
6706       }
6707 
6708     // We can work with PHI and select instruction as we traverse their operands
6709     // during update.
6710     if (isa<SelectInst>(V) || isa<PHINode>(V))
6711       return;
6712 
6713     // Otherwise we give up.
6714     indicatePessimisticFixpoint();
6715 
6716     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6717                       << getAssociatedValue() << "\n");
6718   }
6719 
6720   bool calculateBinaryOperator(
6721       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6722       const Instruction *CtxI,
6723       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6724     Value *LHS = BinOp->getOperand(0);
6725     Value *RHS = BinOp->getOperand(1);
6726     // TODO: Allow non integers as well.
6727     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6728       return false;
6729 
6730     auto &LHSAA =
6731         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6732     QuerriedAAs.push_back(&LHSAA);
6733     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6734 
6735     auto &RHSAA =
6736         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6737     QuerriedAAs.push_back(&RHSAA);
6738     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6739 
6740     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6741 
6742     T.unionAssumed(AssumedRange);
6743 
6744     // TODO: Track a known state too.
6745 
6746     return T.isValidState();
6747   }
6748 
6749   bool calculateCastInst(
6750       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6751       const Instruction *CtxI,
6752       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6753     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6754     // TODO: Allow non integers as well.
6755     Value &OpV = *CastI->getOperand(0);
6756     if (!OpV.getType()->isIntegerTy())
6757       return false;
6758 
6759     auto &OpAA =
6760         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6761     QuerriedAAs.push_back(&OpAA);
6762     T.unionAssumed(
6763         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6764     return T.isValidState();
6765   }
6766 
6767   bool
6768   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6769                    const Instruction *CtxI,
6770                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6771     Value *LHS = CmpI->getOperand(0);
6772     Value *RHS = CmpI->getOperand(1);
6773     // TODO: Allow non integers as well.
6774     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6775       return false;
6776 
6777     auto &LHSAA =
6778         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6779     QuerriedAAs.push_back(&LHSAA);
6780     auto &RHSAA =
6781         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6782     QuerriedAAs.push_back(&RHSAA);
6783 
6784     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6785     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6786 
6787     // If one of them is empty set, we can't decide.
6788     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6789       return true;
6790 
6791     bool MustTrue = false, MustFalse = false;
6792 
6793     auto AllowedRegion =
6794         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6795 
6796     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6797         CmpI->getPredicate(), RHSAARange);
6798 
6799     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6800       MustFalse = true;
6801 
6802     if (SatisfyingRegion.contains(LHSAARange))
6803       MustTrue = true;
6804 
6805     assert((!MustTrue || !MustFalse) &&
6806            "Either MustTrue or MustFalse should be false!");
6807 
6808     if (MustTrue)
6809       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6810     else if (MustFalse)
6811       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6812     else
6813       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6814 
6815     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6816                       << " " << RHSAA << "\n");
6817 
6818     // TODO: Track a known state too.
6819     return T.isValidState();
6820   }
6821 
6822   /// See AbstractAttribute::updateImpl(...).
6823   ChangeStatus updateImpl(Attributor &A) override {
6824     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6825                             IntegerRangeState &T, bool Stripped) -> bool {
6826       Instruction *I = dyn_cast<Instruction>(&V);
6827       if (!I || isa<CallBase>(I)) {
6828 
6829         // If the value is not instruction, we query AA to Attributor.
6830         const auto &AA =
6831             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6832 
6833         // Clamp operator is not used to utilize a program point CtxI.
6834         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6835 
6836         return T.isValidState();
6837       }
6838 
6839       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6840       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6841         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6842           return false;
6843       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6844         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6845           return false;
6846       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6847         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6848           return false;
6849       } else {
6850         // Give up with other instructions.
6851         // TODO: Add other instructions
6852 
6853         T.indicatePessimisticFixpoint();
6854         return false;
6855       }
6856 
6857       // Catch circular reasoning in a pessimistic way for now.
6858       // TODO: Check how the range evolves and if we stripped anything, see also
6859       //       AADereferenceable or AAAlign for similar situations.
6860       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6861         if (QueriedAA != this)
6862           continue;
6863         // If we are in a stady state we do not need to worry.
6864         if (T.getAssumed() == getState().getAssumed())
6865           continue;
6866         T.indicatePessimisticFixpoint();
6867       }
6868 
6869       return T.isValidState();
6870     };
6871 
6872     IntegerRangeState T(getBitWidth());
6873 
6874     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6875             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
6876       return indicatePessimisticFixpoint();
6877 
6878     return clampStateAndIndicateChange(getState(), T);
6879   }
6880 
6881   /// See AbstractAttribute::trackStatistics()
6882   void trackStatistics() const override {
6883     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6884   }
6885 };
6886 
6887 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6888   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6889       : AAValueConstantRangeImpl(IRP, A) {}
6890 
6891   /// See AbstractAttribute::initialize(...).
6892   ChangeStatus updateImpl(Attributor &A) override {
6893     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6894                      "not be called");
6895   }
6896 
6897   /// See AbstractAttribute::trackStatistics()
6898   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6899 };
6900 
6901 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6902   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6903       : AAValueConstantRangeFunction(IRP, A) {}
6904 
6905   /// See AbstractAttribute::trackStatistics()
6906   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6907 };
6908 
6909 struct AAValueConstantRangeCallSiteReturned
6910     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6911                                      AAValueConstantRangeImpl> {
6912   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6913       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6914                                        AAValueConstantRangeImpl>(IRP, A) {}
6915 
6916   /// See AbstractAttribute::initialize(...).
6917   void initialize(Attributor &A) override {
6918     // If it is a load instruction with range metadata, use the metadata.
6919     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6920       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6921         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6922 
6923     AAValueConstantRangeImpl::initialize(A);
6924   }
6925 
6926   /// See AbstractAttribute::trackStatistics()
6927   void trackStatistics() const override {
6928     STATS_DECLTRACK_CSRET_ATTR(value_range)
6929   }
6930 };
6931 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6932   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6933       : AAValueConstantRangeFloating(IRP, A) {}
6934 
6935   /// See AbstractAttribute::trackStatistics()
6936   void trackStatistics() const override {
6937     STATS_DECLTRACK_CSARG_ATTR(value_range)
6938   }
6939 };
6940 } // namespace
6941 
6942 const char AAReturnedValues::ID = 0;
6943 const char AANoUnwind::ID = 0;
6944 const char AANoSync::ID = 0;
6945 const char AANoFree::ID = 0;
6946 const char AANonNull::ID = 0;
6947 const char AANoRecurse::ID = 0;
6948 const char AAWillReturn::ID = 0;
6949 const char AAUndefinedBehavior::ID = 0;
6950 const char AANoAlias::ID = 0;
6951 const char AAReachability::ID = 0;
6952 const char AANoReturn::ID = 0;
6953 const char AAIsDead::ID = 0;
6954 const char AADereferenceable::ID = 0;
6955 const char AAAlign::ID = 0;
6956 const char AANoCapture::ID = 0;
6957 const char AAValueSimplify::ID = 0;
6958 const char AAHeapToStack::ID = 0;
6959 const char AAPrivatizablePtr::ID = 0;
6960 const char AAMemoryBehavior::ID = 0;
6961 const char AAMemoryLocation::ID = 0;
6962 const char AAValueConstantRange::ID = 0;
6963 
6964 // Macro magic to create the static generator function for attributes that
6965 // follow the naming scheme.
6966 
6967 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
6968   case IRPosition::PK:                                                         \
6969     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
6970 
6971 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
6972   case IRPosition::PK:                                                         \
6973     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
6974     ++NumAAs;                                                                  \
6975     break;
6976 
6977 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
6978   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6979     CLASS *AA = nullptr;                                                       \
6980     switch (IRP.getPositionKind()) {                                           \
6981       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6982       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
6983       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
6984       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
6985       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
6986       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
6987       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
6988       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
6989     }                                                                          \
6990     return *AA;                                                                \
6991   }
6992 
6993 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
6994   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
6995     CLASS *AA = nullptr;                                                       \
6996     switch (IRP.getPositionKind()) {                                           \
6997       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
6998       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
6999       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7000       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7001       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7002       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7003       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7004       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7005     }                                                                          \
7006     return *AA;                                                                \
7007   }
7008 
7009 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7010   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7011     CLASS *AA = nullptr;                                                       \
7012     switch (IRP.getPositionKind()) {                                           \
7013       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7014       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7015       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7016       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7017       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7018       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7019       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7020       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7021     }                                                                          \
7022     return *AA;                                                                \
7023   }
7024 
7025 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7026   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7027     CLASS *AA = nullptr;                                                       \
7028     switch (IRP.getPositionKind()) {                                           \
7029       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7030       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7031       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7032       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7033       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7034       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7035       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7036       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7037     }                                                                          \
7038     return *AA;                                                                \
7039   }
7040 
7041 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7042   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7043     CLASS *AA = nullptr;                                                       \
7044     switch (IRP.getPositionKind()) {                                           \
7045       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7046       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7047       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7048       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7049       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7050       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7051       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7052       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7053     }                                                                          \
7054     return *AA;                                                                \
7055   }
7056 
7057 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7058 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7059 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7060 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7061 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7062 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7063 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7064 
7065 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7066 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7067 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7068 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7069 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7070 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7071 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7072 
7073 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7074 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7075 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7076 
7077 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7078 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7079 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7080 
7081 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7082 
7083 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7084 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7085 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7086 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7087 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7088 #undef SWITCH_PK_CREATE
7089 #undef SWITCH_PK_INV
7090