1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AssumeBundleQueries.h"
19 #include "llvm/Analysis/CaptureTracking.h"
20 #include "llvm/Analysis/LazyValueInfo.h"
21 #include "llvm/Analysis/MemoryBuiltins.h"
22 #include "llvm/Analysis/ScalarEvolution.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 
30 #include <cassert>
31 
32 using namespace llvm;
33 
34 #define DEBUG_TYPE "attributor"
35 
36 static cl::opt<bool> ManifestInternal(
37     "attributor-manifest-internal", cl::Hidden,
38     cl::desc("Manifest Attributor internal string attributes."),
39     cl::init(false));
40 
41 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
42                                        cl::Hidden);
43 
44 STATISTIC(NumAAs, "Number of abstract attributes created");
45 
46 // Some helper macros to deal with statistics tracking.
47 //
48 // Usage:
49 // For simple IR attribute tracking overload trackStatistics in the abstract
50 // attribute and choose the right STATS_DECLTRACK_********* macro,
51 // e.g.,:
52 //  void trackStatistics() const override {
53 //    STATS_DECLTRACK_ARG_ATTR(returned)
54 //  }
55 // If there is a single "increment" side one can use the macro
56 // STATS_DECLTRACK with a custom message. If there are multiple increment
57 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
58 //
59 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
60   ("Number of " #TYPE " marked '" #NAME "'")
61 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
62 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
63 #define STATS_DECL(NAME, TYPE, MSG)                                            \
64   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
65 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
66 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
67   {                                                                            \
68     STATS_DECL(NAME, TYPE, MSG)                                                \
69     STATS_TRACK(NAME, TYPE)                                                    \
70   }
71 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
72   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
73 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
74   STATS_DECLTRACK(NAME, CSArguments,                                           \
75                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
76 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
77   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
78 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
79   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
80 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
81   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
82                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
83 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
84   STATS_DECLTRACK(NAME, CSReturn,                                              \
85                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
86 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
87   STATS_DECLTRACK(NAME, Floating,                                              \
88                   ("Number of floating values known to be '" #NAME "'"))
89 
90 // Specialization of the operator<< for abstract attributes subclasses. This
91 // disambiguates situations where multiple operators are applicable.
92 namespace llvm {
93 #define PIPE_OPERATOR(CLASS)                                                   \
94   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
95     return OS << static_cast<const AbstractAttribute &>(AA);                   \
96   }
97 
98 PIPE_OPERATOR(AAIsDead)
99 PIPE_OPERATOR(AANoUnwind)
100 PIPE_OPERATOR(AANoSync)
101 PIPE_OPERATOR(AANoRecurse)
102 PIPE_OPERATOR(AAWillReturn)
103 PIPE_OPERATOR(AANoReturn)
104 PIPE_OPERATOR(AAReturnedValues)
105 PIPE_OPERATOR(AANonNull)
106 PIPE_OPERATOR(AANoAlias)
107 PIPE_OPERATOR(AADereferenceable)
108 PIPE_OPERATOR(AAAlign)
109 PIPE_OPERATOR(AANoCapture)
110 PIPE_OPERATOR(AAValueSimplify)
111 PIPE_OPERATOR(AANoFree)
112 PIPE_OPERATOR(AAHeapToStack)
113 PIPE_OPERATOR(AAReachability)
114 PIPE_OPERATOR(AAMemoryBehavior)
115 PIPE_OPERATOR(AAMemoryLocation)
116 PIPE_OPERATOR(AAValueConstantRange)
117 PIPE_OPERATOR(AAPrivatizablePtr)
118 PIPE_OPERATOR(AAUndefinedBehavior)
119 
120 #undef PIPE_OPERATOR
121 } // namespace llvm
122 
123 namespace {
124 
125 static Optional<ConstantInt *>
126 getAssumedConstantInt(Attributor &A, const Value &V,
127                       const AbstractAttribute &AA,
128                       bool &UsedAssumedInformation) {
129   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
130   if (C.hasValue())
131     return dyn_cast_or_null<ConstantInt>(C.getValue());
132   return llvm::None;
133 }
134 
135 /// Get pointer operand of memory accessing instruction. If \p I is
136 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
137 /// is set to false and the instruction is volatile, return nullptr.
138 static const Value *getPointerOperand(const Instruction *I,
139                                       bool AllowVolatile) {
140   if (auto *LI = dyn_cast<LoadInst>(I)) {
141     if (!AllowVolatile && LI->isVolatile())
142       return nullptr;
143     return LI->getPointerOperand();
144   }
145 
146   if (auto *SI = dyn_cast<StoreInst>(I)) {
147     if (!AllowVolatile && SI->isVolatile())
148       return nullptr;
149     return SI->getPointerOperand();
150   }
151 
152   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
153     if (!AllowVolatile && CXI->isVolatile())
154       return nullptr;
155     return CXI->getPointerOperand();
156   }
157 
158   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
159     if (!AllowVolatile && RMWI->isVolatile())
160       return nullptr;
161     return RMWI->getPointerOperand();
162   }
163 
164   return nullptr;
165 }
166 
167 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
168 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
169 /// getelement pointer instructions that traverse the natural type of \p Ptr if
170 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
171 /// through a cast to i8*.
172 ///
173 /// TODO: This could probably live somewhere more prominantly if it doesn't
174 ///       already exist.
175 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
176                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
177   assert(Offset >= 0 && "Negative offset not supported yet!");
178   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
179                     << "-bytes as " << *ResTy << "\n");
180 
181   // The initial type we are trying to traverse to get nice GEPs.
182   Type *Ty = Ptr->getType();
183 
184   SmallVector<Value *, 4> Indices;
185   std::string GEPName = Ptr->getName().str();
186   while (Offset) {
187     uint64_t Idx, Rem;
188 
189     if (auto *STy = dyn_cast<StructType>(Ty)) {
190       const StructLayout *SL = DL.getStructLayout(STy);
191       if (int64_t(SL->getSizeInBytes()) < Offset)
192         break;
193       Idx = SL->getElementContainingOffset(Offset);
194       assert(Idx < STy->getNumElements() && "Offset calculation error!");
195       Rem = Offset - SL->getElementOffset(Idx);
196       Ty = STy->getElementType(Idx);
197     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
198       Ty = PTy->getElementType();
199       if (!Ty->isSized())
200         break;
201       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
202       assert(ElementSize && "Expected type with size!");
203       Idx = Offset / ElementSize;
204       Rem = Offset % ElementSize;
205     } else {
206       // Non-aggregate type, we cast and make byte-wise progress now.
207       break;
208     }
209 
210     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213     GEPName += "." + std::to_string(Idx);
214     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215     Offset = Rem;
216   }
217 
218   // Create a GEP if we collected indices above.
219   if (Indices.size())
220     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
221 
222   // If an offset is left we use byte-wise adjustment.
223   if (Offset) {
224     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
225     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
226                         GEPName + ".b" + Twine(Offset));
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
257         /* TrackDependence */ false);
258   bool AnyDead = false;
259 
260   using Item = std::pair<Value *, const Instruction *>;
261   SmallSet<Item, 16> Visited;
262   SmallVector<Item, 16> Worklist;
263   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
264 
265   int Iteration = 0;
266   do {
267     Item I = Worklist.pop_back_val();
268     Value *V = I.first;
269     CtxI = I.second;
270     if (StripCB)
271       V = StripCB(V);
272 
273     // Check if we should process the current value. To prevent endless
274     // recursion keep a record of the values we followed!
275     if (!Visited.insert(I).second)
276       continue;
277 
278     // Make sure we limit the compile time for complex expressions.
279     if (Iteration++ >= MaxValues)
280       return false;
281 
282     // Explicitly look through calls with a "returned" attribute if we do
283     // not have a pointer as stripPointerCasts only works on them.
284     Value *NewV = nullptr;
285     if (V->getType()->isPointerTy()) {
286       NewV = V->stripPointerCasts();
287     } else {
288       auto *CB = dyn_cast<CallBase>(V);
289       if (CB && CB->getCalledFunction()) {
290         for (Argument &Arg : CB->getCalledFunction()->args())
291           if (Arg.hasReturnedAttr()) {
292             NewV = CB->getArgOperand(Arg.getArgNo());
293             break;
294           }
295       }
296     }
297     if (NewV && NewV != V) {
298       Worklist.push_back({NewV, CtxI});
299       continue;
300     }
301 
302     // Look through select instructions, visit both potential values.
303     if (auto *SI = dyn_cast<SelectInst>(V)) {
304       Worklist.push_back({SI->getTrueValue(), CtxI});
305       Worklist.push_back({SI->getFalseValue(), CtxI});
306       continue;
307     }
308 
309     // Look through phi nodes, visit all live operands.
310     if (auto *PHI = dyn_cast<PHINode>(V)) {
311       assert(LivenessAA &&
312              "Expected liveness in the presence of instructions!");
313       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
314         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
315         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
316                             LivenessAA,
317                             /* CheckBBLivenessOnly */ true)) {
318           AnyDead = true;
319           continue;
320         }
321         Worklist.push_back(
322             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
323       }
324       continue;
325     }
326 
327     if (UseValueSimplify && !isa<Constant>(V)) {
328       bool UsedAssumedInformation = false;
329       Optional<Constant *> C =
330           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
331       if (!C.hasValue())
332         continue;
333       if (Value *NewV = C.getValue()) {
334         Worklist.push_back({NewV, CtxI});
335         continue;
336       }
337     }
338 
339     // Once a leaf is reached we inform the user through the callback.
340     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
341       return false;
342   } while (!Worklist.empty());
343 
344   // If we actually used liveness information so we have to record a dependence.
345   if (AnyDead)
346     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
347 
348   // All values have been visited.
349   return true;
350 }
351 
352 static const Value *
353 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
354                                      const DataLayout &DL,
355                                      bool AllowNonInbounds = false) {
356   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
357   if (!Ptr)
358     return nullptr;
359 
360   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
361                                           AllowNonInbounds);
362 }
363 
364 /// Helper function to clamp a state \p S of type \p StateType with the
365 /// information in \p R and indicate/return if \p S did change (as-in update is
366 /// required to be run again).
367 template <typename StateType>
368 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
369   auto Assumed = S.getAssumed();
370   S ^= R;
371   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
372                                    : ChangeStatus::CHANGED;
373 }
374 
375 /// Clamp the information known for all returned values of a function
376 /// (identified by \p QueryingAA) into \p S.
377 template <typename AAType, typename StateType = typename AAType::StateType>
378 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
379                                      StateType &S) {
380   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
381                     << QueryingAA << " into " << S << "\n");
382 
383   assert((QueryingAA.getIRPosition().getPositionKind() ==
384               IRPosition::IRP_RETURNED ||
385           QueryingAA.getIRPosition().getPositionKind() ==
386               IRPosition::IRP_CALL_SITE_RETURNED) &&
387          "Can only clamp returned value states for a function returned or call "
388          "site returned position!");
389 
390   // Use an optional state as there might not be any return values and we want
391   // to join (IntegerState::operator&) the state of all there are.
392   Optional<StateType> T;
393 
394   // Callback for each possibly returned value.
395   auto CheckReturnValue = [&](Value &RV) -> bool {
396     const IRPosition &RVPos = IRPosition::value(RV);
397     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
398     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
399                       << " @ " << RVPos << "\n");
400     const StateType &AAS = static_cast<const StateType &>(AA.getState());
401     if (T.hasValue())
402       *T &= AAS;
403     else
404       T = AAS;
405     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
406                       << "\n");
407     return T->isValidState();
408   };
409 
410   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
411     S.indicatePessimisticFixpoint();
412   else if (T.hasValue())
413     S ^= *T;
414 }
415 
416 /// Helper class for generic deduction: return value -> returned position.
417 template <typename AAType, typename BaseType,
418           typename StateType = typename BaseType::StateType>
419 struct AAReturnedFromReturnedValues : public BaseType {
420   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
421       : BaseType(IRP, A) {}
422 
423   /// See AbstractAttribute::updateImpl(...).
424   ChangeStatus updateImpl(Attributor &A) override {
425     StateType S(StateType::getBestState(this->getState()));
426     clampReturnedValueStates<AAType, StateType>(A, *this, S);
427     // TODO: If we know we visited all returned values, thus no are assumed
428     // dead, we can take the known information from the state T.
429     return clampStateAndIndicateChange<StateType>(this->getState(), S);
430   }
431 };
432 
433 /// Clamp the information known at all call sites for a given argument
434 /// (identified by \p QueryingAA) into \p S.
435 template <typename AAType, typename StateType = typename AAType::StateType>
436 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
437                                         StateType &S) {
438   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
439                     << QueryingAA << " into " << S << "\n");
440 
441   assert(QueryingAA.getIRPosition().getPositionKind() ==
442              IRPosition::IRP_ARGUMENT &&
443          "Can only clamp call site argument states for an argument position!");
444 
445   // Use an optional state as there might not be any return values and we want
446   // to join (IntegerState::operator&) the state of all there are.
447   Optional<StateType> T;
448 
449   // The argument number which is also the call site argument number.
450   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
451 
452   auto CallSiteCheck = [&](AbstractCallSite ACS) {
453     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
454     // Check if a coresponding argument was found or if it is on not associated
455     // (which can happen for callback calls).
456     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
457       return false;
458 
459     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
460     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
461                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
462     const StateType &AAS = static_cast<const StateType &>(AA.getState());
463     if (T.hasValue())
464       *T &= AAS;
465     else
466       T = AAS;
467     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
468                       << "\n");
469     return T->isValidState();
470   };
471 
472   bool AllCallSitesKnown;
473   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
474                               AllCallSitesKnown))
475     S.indicatePessimisticFixpoint();
476   else if (T.hasValue())
477     S ^= *T;
478 }
479 
480 /// Helper class for generic deduction: call site argument -> argument position.
481 template <typename AAType, typename BaseType,
482           typename StateType = typename AAType::StateType>
483 struct AAArgumentFromCallSiteArguments : public BaseType {
484   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
485       : BaseType(IRP, A) {}
486 
487   /// See AbstractAttribute::updateImpl(...).
488   ChangeStatus updateImpl(Attributor &A) override {
489     StateType S(StateType::getBestState(this->getState()));
490     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
491     // TODO: If we know we visited all incoming values, thus no are assumed
492     // dead, we can take the known information from the state T.
493     return clampStateAndIndicateChange<StateType>(this->getState(), S);
494   }
495 };
496 
497 /// Helper class for generic replication: function returned -> cs returned.
498 template <typename AAType, typename BaseType,
499           typename StateType = typename BaseType::StateType>
500 struct AACallSiteReturnedFromReturned : public BaseType {
501   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
502       : BaseType(IRP, A) {}
503 
504   /// See AbstractAttribute::updateImpl(...).
505   ChangeStatus updateImpl(Attributor &A) override {
506     assert(this->getIRPosition().getPositionKind() ==
507                IRPosition::IRP_CALL_SITE_RETURNED &&
508            "Can only wrap function returned positions for call site returned "
509            "positions!");
510     auto &S = this->getState();
511 
512     const Function *AssociatedFunction =
513         this->getIRPosition().getAssociatedFunction();
514     if (!AssociatedFunction)
515       return S.indicatePessimisticFixpoint();
516 
517     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
518     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
519     return clampStateAndIndicateChange(
520         S, static_cast<const StateType &>(AA.getState()));
521   }
522 };
523 
524 /// Helper function to accumulate uses.
525 template <class AAType, typename StateType = typename AAType::StateType>
526 static void followUsesInContext(AAType &AA, Attributor &A,
527                                 MustBeExecutedContextExplorer &Explorer,
528                                 const Instruction *CtxI,
529                                 SetVector<const Use *> &Uses,
530                                 StateType &State) {
531   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
532   for (unsigned u = 0; u < Uses.size(); ++u) {
533     const Use *U = Uses[u];
534     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
535       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
536       if (Found && AA.followUseInMBEC(A, U, UserI, State))
537         for (const Use &Us : UserI->uses())
538           Uses.insert(&Us);
539     }
540   }
541 }
542 
543 /// Use the must-be-executed-context around \p I to add information into \p S.
544 /// The AAType class is required to have `followUseInMBEC` method with the
545 /// following signature and behaviour:
546 ///
547 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
548 /// U - Underlying use.
549 /// I - The user of the \p U.
550 /// Returns true if the value should be tracked transitively.
551 ///
552 template <class AAType, typename StateType = typename AAType::StateType>
553 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
554                              Instruction &CtxI) {
555 
556   // Container for (transitive) uses of the associated value.
557   SetVector<const Use *> Uses;
558   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
559     Uses.insert(&U);
560 
561   MustBeExecutedContextExplorer &Explorer =
562       A.getInfoCache().getMustBeExecutedContextExplorer();
563 
564   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
565 
566   if (S.isAtFixpoint())
567     return;
568 
569   SmallVector<const BranchInst *, 4> BrInsts;
570   auto Pred = [&](const Instruction *I) {
571     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
572       if (Br->isConditional())
573         BrInsts.push_back(Br);
574     return true;
575   };
576 
577   // Here, accumulate conditional branch instructions in the context. We
578   // explore the child paths and collect the known states. The disjunction of
579   // those states can be merged to its own state. Let ParentState_i be a state
580   // to indicate the known information for an i-th branch instruction in the
581   // context. ChildStates are created for its successors respectively.
582   //
583   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
584   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
585   //      ...
586   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
587   //
588   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
589   //
590   // FIXME: Currently, recursive branches are not handled. For example, we
591   // can't deduce that ptr must be dereferenced in below function.
592   //
593   // void f(int a, int c, int *ptr) {
594   //    if(a)
595   //      if (b) {
596   //        *ptr = 0;
597   //      } else {
598   //        *ptr = 1;
599   //      }
600   //    else {
601   //      if (b) {
602   //        *ptr = 0;
603   //      } else {
604   //        *ptr = 1;
605   //      }
606   //    }
607   // }
608 
609   Explorer.checkForAllContext(&CtxI, Pred);
610   for (const BranchInst *Br : BrInsts) {
611     StateType ParentState;
612 
613     // The known state of the parent state is a conjunction of children's
614     // known states so it is initialized with a best state.
615     ParentState.indicateOptimisticFixpoint();
616 
617     for (const BasicBlock *BB : Br->successors()) {
618       StateType ChildState;
619 
620       size_t BeforeSize = Uses.size();
621       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
622 
623       // Erase uses which only appear in the child.
624       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
625         It = Uses.erase(It);
626 
627       ParentState &= ChildState;
628     }
629 
630     // Use only known state.
631     S += ParentState;
632   }
633 }
634 
635 /// -----------------------NoUnwind Function Attribute--------------------------
636 
637 struct AANoUnwindImpl : AANoUnwind {
638   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
639 
640   const std::string getAsStr() const override {
641     return getAssumed() ? "nounwind" : "may-unwind";
642   }
643 
644   /// See AbstractAttribute::updateImpl(...).
645   ChangeStatus updateImpl(Attributor &A) override {
646     auto Opcodes = {
647         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
648         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
649         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
650 
651     auto CheckForNoUnwind = [&](Instruction &I) {
652       if (!I.mayThrow())
653         return true;
654 
655       if (const auto *CB = dyn_cast<CallBase>(&I)) {
656         const auto &NoUnwindAA =
657             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
658         return NoUnwindAA.isAssumedNoUnwind();
659       }
660       return false;
661     };
662 
663     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
664       return indicatePessimisticFixpoint();
665 
666     return ChangeStatus::UNCHANGED;
667   }
668 };
669 
670 struct AANoUnwindFunction final : public AANoUnwindImpl {
671   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
672       : AANoUnwindImpl(IRP, A) {}
673 
674   /// See AbstractAttribute::trackStatistics()
675   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
676 };
677 
678 /// NoUnwind attribute deduction for a call sites.
679 struct AANoUnwindCallSite final : AANoUnwindImpl {
680   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
681       : AANoUnwindImpl(IRP, A) {}
682 
683   /// See AbstractAttribute::initialize(...).
684   void initialize(Attributor &A) override {
685     AANoUnwindImpl::initialize(A);
686     Function *F = getAssociatedFunction();
687     if (!F)
688       indicatePessimisticFixpoint();
689   }
690 
691   /// See AbstractAttribute::updateImpl(...).
692   ChangeStatus updateImpl(Attributor &A) override {
693     // TODO: Once we have call site specific value information we can provide
694     //       call site specific liveness information and then it makes
695     //       sense to specialize attributes for call sites arguments instead of
696     //       redirecting requests to the callee argument.
697     Function *F = getAssociatedFunction();
698     const IRPosition &FnPos = IRPosition::function(*F);
699     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
700     return clampStateAndIndicateChange(
701         getState(),
702         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
703   }
704 
705   /// See AbstractAttribute::trackStatistics()
706   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
707 };
708 
709 /// --------------------- Function Return Values -------------------------------
710 
711 /// "Attribute" that collects all potential returned values and the return
712 /// instructions that they arise from.
713 ///
714 /// If there is a unique returned value R, the manifest method will:
715 ///   - mark R with the "returned" attribute, if R is an argument.
716 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
717 
718   /// Mapping of values potentially returned by the associated function to the
719   /// return instructions that might return them.
720   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
721 
722   /// Mapping to remember the number of returned values for a call site such
723   /// that we can avoid updates if nothing changed.
724   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
725 
726   /// Set of unresolved calls returned by the associated function.
727   SmallSetVector<CallBase *, 4> UnresolvedCalls;
728 
729   /// State flags
730   ///
731   ///{
732   bool IsFixed = false;
733   bool IsValidState = true;
734   ///}
735 
736 public:
737   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
738       : AAReturnedValues(IRP, A) {}
739 
740   /// See AbstractAttribute::initialize(...).
741   void initialize(Attributor &A) override {
742     // Reset the state.
743     IsFixed = false;
744     IsValidState = true;
745     ReturnedValues.clear();
746 
747     Function *F = getAssociatedFunction();
748     if (!F) {
749       indicatePessimisticFixpoint();
750       return;
751     }
752     assert(!F->getReturnType()->isVoidTy() &&
753            "Did not expect a void return type!");
754 
755     // The map from instruction opcodes to those instructions in the function.
756     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
757 
758     // Look through all arguments, if one is marked as returned we are done.
759     for (Argument &Arg : F->args()) {
760       if (Arg.hasReturnedAttr()) {
761         auto &ReturnInstSet = ReturnedValues[&Arg];
762         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
763           for (Instruction *RI : *Insts)
764             ReturnInstSet.insert(cast<ReturnInst>(RI));
765 
766         indicateOptimisticFixpoint();
767         return;
768       }
769     }
770 
771     if (!A.isFunctionIPOAmendable(*F))
772       indicatePessimisticFixpoint();
773   }
774 
775   /// See AbstractAttribute::manifest(...).
776   ChangeStatus manifest(Attributor &A) override;
777 
778   /// See AbstractAttribute::getState(...).
779   AbstractState &getState() override { return *this; }
780 
781   /// See AbstractAttribute::getState(...).
782   const AbstractState &getState() const override { return *this; }
783 
784   /// See AbstractAttribute::updateImpl(Attributor &A).
785   ChangeStatus updateImpl(Attributor &A) override;
786 
787   llvm::iterator_range<iterator> returned_values() override {
788     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
789   }
790 
791   llvm::iterator_range<const_iterator> returned_values() const override {
792     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
793   }
794 
795   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
796     return UnresolvedCalls;
797   }
798 
799   /// Return the number of potential return values, -1 if unknown.
800   size_t getNumReturnValues() const override {
801     return isValidState() ? ReturnedValues.size() : -1;
802   }
803 
804   /// Return an assumed unique return value if a single candidate is found. If
805   /// there cannot be one, return a nullptr. If it is not clear yet, return the
806   /// Optional::NoneType.
807   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
808 
809   /// See AbstractState::checkForAllReturnedValues(...).
810   bool checkForAllReturnedValuesAndReturnInsts(
811       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
812       const override;
813 
814   /// Pretty print the attribute similar to the IR representation.
815   const std::string getAsStr() const override;
816 
817   /// See AbstractState::isAtFixpoint().
818   bool isAtFixpoint() const override { return IsFixed; }
819 
820   /// See AbstractState::isValidState().
821   bool isValidState() const override { return IsValidState; }
822 
823   /// See AbstractState::indicateOptimisticFixpoint(...).
824   ChangeStatus indicateOptimisticFixpoint() override {
825     IsFixed = true;
826     return ChangeStatus::UNCHANGED;
827   }
828 
829   ChangeStatus indicatePessimisticFixpoint() override {
830     IsFixed = true;
831     IsValidState = false;
832     return ChangeStatus::CHANGED;
833   }
834 };
835 
836 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
837   ChangeStatus Changed = ChangeStatus::UNCHANGED;
838 
839   // Bookkeeping.
840   assert(isValidState());
841   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
842                   "Number of function with known return values");
843 
844   // Check if we have an assumed unique return value that we could manifest.
845   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
846 
847   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
848     return Changed;
849 
850   // Bookkeeping.
851   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
852                   "Number of function with unique return");
853 
854   // Callback to replace the uses of CB with the constant C.
855   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
856     if (CB.use_empty())
857       return ChangeStatus::UNCHANGED;
858     if (A.changeValueAfterManifest(CB, C))
859       return ChangeStatus::CHANGED;
860     return ChangeStatus::UNCHANGED;
861   };
862 
863   // If the assumed unique return value is an argument, annotate it.
864   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
865     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
866             getAssociatedFunction()->getReturnType())) {
867       getIRPosition() = IRPosition::argument(*UniqueRVArg);
868       Changed = IRAttribute::manifest(A);
869     }
870   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
871     // We can replace the returned value with the unique returned constant.
872     Value &AnchorValue = getAnchorValue();
873     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
874       for (const Use &U : F->uses())
875         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
876           if (CB->isCallee(&U)) {
877             Constant *RVCCast =
878                 CB->getType() == RVC->getType()
879                     ? RVC
880                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
881             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
882           }
883     } else {
884       assert(isa<CallBase>(AnchorValue) &&
885              "Expcected a function or call base anchor!");
886       Constant *RVCCast =
887           AnchorValue.getType() == RVC->getType()
888               ? RVC
889               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
890       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
891     }
892     if (Changed == ChangeStatus::CHANGED)
893       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
894                       "Number of function returns replaced by constant return");
895   }
896 
897   return Changed;
898 }
899 
900 const std::string AAReturnedValuesImpl::getAsStr() const {
901   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
902          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
903          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
904 }
905 
906 Optional<Value *>
907 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
908   // If checkForAllReturnedValues provides a unique value, ignoring potential
909   // undef values that can also be present, it is assumed to be the actual
910   // return value and forwarded to the caller of this method. If there are
911   // multiple, a nullptr is returned indicating there cannot be a unique
912   // returned value.
913   Optional<Value *> UniqueRV;
914 
915   auto Pred = [&](Value &RV) -> bool {
916     // If we found a second returned value and neither the current nor the saved
917     // one is an undef, there is no unique returned value. Undefs are special
918     // since we can pretend they have any value.
919     if (UniqueRV.hasValue() && UniqueRV != &RV &&
920         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
921       UniqueRV = nullptr;
922       return false;
923     }
924 
925     // Do not overwrite a value with an undef.
926     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
927       UniqueRV = &RV;
928 
929     return true;
930   };
931 
932   if (!A.checkForAllReturnedValues(Pred, *this))
933     UniqueRV = nullptr;
934 
935   return UniqueRV;
936 }
937 
938 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
939     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
940     const {
941   if (!isValidState())
942     return false;
943 
944   // Check all returned values but ignore call sites as long as we have not
945   // encountered an overdefined one during an update.
946   for (auto &It : ReturnedValues) {
947     Value *RV = It.first;
948 
949     CallBase *CB = dyn_cast<CallBase>(RV);
950     if (CB && !UnresolvedCalls.count(CB))
951       continue;
952 
953     if (!Pred(*RV, It.second))
954       return false;
955   }
956 
957   return true;
958 }
959 
960 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
961   size_t NumUnresolvedCalls = UnresolvedCalls.size();
962   bool Changed = false;
963 
964   // State used in the value traversals starting in returned values.
965   struct RVState {
966     // The map in which we collect return values -> return instrs.
967     decltype(ReturnedValues) &RetValsMap;
968     // The flag to indicate a change.
969     bool &Changed;
970     // The return instrs we come from.
971     SmallSetVector<ReturnInst *, 4> RetInsts;
972   };
973 
974   // Callback for a leaf value returned by the associated function.
975   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
976                          bool) -> bool {
977     auto Size = RVS.RetValsMap[&Val].size();
978     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
979     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
980     RVS.Changed |= Inserted;
981     LLVM_DEBUG({
982       if (Inserted)
983         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
984                << " => " << RVS.RetInsts.size() << "\n";
985     });
986     return true;
987   };
988 
989   // Helper method to invoke the generic value traversal.
990   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
991                                 const Instruction *CtxI) {
992     IRPosition RetValPos = IRPosition::value(RV);
993     return genericValueTraversal<AAReturnedValues, RVState>(
994         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
995         /* UseValueSimplify */ false);
996   };
997 
998   // Callback for all "return intructions" live in the associated function.
999   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1000     ReturnInst &Ret = cast<ReturnInst>(I);
1001     RVState RVS({ReturnedValues, Changed, {}});
1002     RVS.RetInsts.insert(&Ret);
1003     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1004   };
1005 
1006   // Start by discovering returned values from all live returned instructions in
1007   // the associated function.
1008   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1009     return indicatePessimisticFixpoint();
1010 
1011   // Once returned values "directly" present in the code are handled we try to
1012   // resolve returned calls.
1013   decltype(ReturnedValues) NewRVsMap;
1014   for (auto &It : ReturnedValues) {
1015     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
1016                       << " by #" << It.second.size() << " RIs\n");
1017     CallBase *CB = dyn_cast<CallBase>(It.first);
1018     if (!CB || UnresolvedCalls.count(CB))
1019       continue;
1020 
1021     if (!CB->getCalledFunction()) {
1022       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1023                         << "\n");
1024       UnresolvedCalls.insert(CB);
1025       continue;
1026     }
1027 
1028     // TODO: use the function scope once we have call site AAReturnedValues.
1029     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1030         *this, IRPosition::function(*CB->getCalledFunction()));
1031     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1032                       << RetValAA << "\n");
1033 
1034     // Skip dead ends, thus if we do not know anything about the returned
1035     // call we mark it as unresolved and it will stay that way.
1036     if (!RetValAA.getState().isValidState()) {
1037       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1038                         << "\n");
1039       UnresolvedCalls.insert(CB);
1040       continue;
1041     }
1042 
1043     // Do not try to learn partial information. If the callee has unresolved
1044     // return values we will treat the call as unresolved/opaque.
1045     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1046     if (!RetValAAUnresolvedCalls.empty()) {
1047       UnresolvedCalls.insert(CB);
1048       continue;
1049     }
1050 
1051     // Now check if we can track transitively returned values. If possible, thus
1052     // if all return value can be represented in the current scope, do so.
1053     bool Unresolved = false;
1054     for (auto &RetValAAIt : RetValAA.returned_values()) {
1055       Value *RetVal = RetValAAIt.first;
1056       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1057           isa<Constant>(RetVal))
1058         continue;
1059       // Anything that did not fit in the above categories cannot be resolved,
1060       // mark the call as unresolved.
1061       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1062                            "cannot be translated: "
1063                         << *RetVal << "\n");
1064       UnresolvedCalls.insert(CB);
1065       Unresolved = true;
1066       break;
1067     }
1068 
1069     if (Unresolved)
1070       continue;
1071 
1072     // Now track transitively returned values.
1073     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1074     if (NumRetAA == RetValAA.getNumReturnValues()) {
1075       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1076                            "changed since it was seen last\n");
1077       continue;
1078     }
1079     NumRetAA = RetValAA.getNumReturnValues();
1080 
1081     for (auto &RetValAAIt : RetValAA.returned_values()) {
1082       Value *RetVal = RetValAAIt.first;
1083       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1084         // Arguments are mapped to call site operands and we begin the traversal
1085         // again.
1086         bool Unused = false;
1087         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1088         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1089         continue;
1090       } else if (isa<CallBase>(RetVal)) {
1091         // Call sites are resolved by the callee attribute over time, no need to
1092         // do anything for us.
1093         continue;
1094       } else if (isa<Constant>(RetVal)) {
1095         // Constants are valid everywhere, we can simply take them.
1096         NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1097         continue;
1098       }
1099     }
1100   }
1101 
1102   // To avoid modifications to the ReturnedValues map while we iterate over it
1103   // we kept record of potential new entries in a copy map, NewRVsMap.
1104   for (auto &It : NewRVsMap) {
1105     assert(!It.second.empty() && "Entry does not add anything.");
1106     auto &ReturnInsts = ReturnedValues[It.first];
1107     for (ReturnInst *RI : It.second)
1108       if (ReturnInsts.insert(RI)) {
1109         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1110                           << *It.first << " => " << *RI << "\n");
1111         Changed = true;
1112       }
1113   }
1114 
1115   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1116   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1117 }
1118 
1119 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1120   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1121       : AAReturnedValuesImpl(IRP, A) {}
1122 
1123   /// See AbstractAttribute::trackStatistics()
1124   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1125 };
1126 
1127 /// Returned values information for a call sites.
1128 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1129   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1130       : AAReturnedValuesImpl(IRP, A) {}
1131 
1132   /// See AbstractAttribute::initialize(...).
1133   void initialize(Attributor &A) override {
1134     // TODO: Once we have call site specific value information we can provide
1135     //       call site specific liveness information and then it makes
1136     //       sense to specialize attributes for call sites instead of
1137     //       redirecting requests to the callee.
1138     llvm_unreachable("Abstract attributes for returned values are not "
1139                      "supported for call sites yet!");
1140   }
1141 
1142   /// See AbstractAttribute::updateImpl(...).
1143   ChangeStatus updateImpl(Attributor &A) override {
1144     return indicatePessimisticFixpoint();
1145   }
1146 
1147   /// See AbstractAttribute::trackStatistics()
1148   void trackStatistics() const override {}
1149 };
1150 
1151 /// ------------------------ NoSync Function Attribute -------------------------
1152 
1153 struct AANoSyncImpl : AANoSync {
1154   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1155 
1156   const std::string getAsStr() const override {
1157     return getAssumed() ? "nosync" : "may-sync";
1158   }
1159 
1160   /// See AbstractAttribute::updateImpl(...).
1161   ChangeStatus updateImpl(Attributor &A) override;
1162 
1163   /// Helper function used to determine whether an instruction is non-relaxed
1164   /// atomic. In other words, if an atomic instruction does not have unordered
1165   /// or monotonic ordering
1166   static bool isNonRelaxedAtomic(Instruction *I);
1167 
1168   /// Helper function used to determine whether an instruction is volatile.
1169   static bool isVolatile(Instruction *I);
1170 
1171   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1172   /// memset).
1173   static bool isNoSyncIntrinsic(Instruction *I);
1174 };
1175 
1176 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1177   if (!I->isAtomic())
1178     return false;
1179 
1180   AtomicOrdering Ordering;
1181   switch (I->getOpcode()) {
1182   case Instruction::AtomicRMW:
1183     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1184     break;
1185   case Instruction::Store:
1186     Ordering = cast<StoreInst>(I)->getOrdering();
1187     break;
1188   case Instruction::Load:
1189     Ordering = cast<LoadInst>(I)->getOrdering();
1190     break;
1191   case Instruction::Fence: {
1192     auto *FI = cast<FenceInst>(I);
1193     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1194       return false;
1195     Ordering = FI->getOrdering();
1196     break;
1197   }
1198   case Instruction::AtomicCmpXchg: {
1199     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1200     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1201     // Only if both are relaxed, than it can be treated as relaxed.
1202     // Otherwise it is non-relaxed.
1203     if (Success != AtomicOrdering::Unordered &&
1204         Success != AtomicOrdering::Monotonic)
1205       return true;
1206     if (Failure != AtomicOrdering::Unordered &&
1207         Failure != AtomicOrdering::Monotonic)
1208       return true;
1209     return false;
1210   }
1211   default:
1212     llvm_unreachable(
1213         "New atomic operations need to be known in the attributor.");
1214   }
1215 
1216   // Relaxed.
1217   if (Ordering == AtomicOrdering::Unordered ||
1218       Ordering == AtomicOrdering::Monotonic)
1219     return false;
1220   return true;
1221 }
1222 
1223 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1224 /// FIXME: We should ipmrove the handling of intrinsics.
1225 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1226   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1227     switch (II->getIntrinsicID()) {
1228     /// Element wise atomic memory intrinsics are can only be unordered,
1229     /// therefore nosync.
1230     case Intrinsic::memset_element_unordered_atomic:
1231     case Intrinsic::memmove_element_unordered_atomic:
1232     case Intrinsic::memcpy_element_unordered_atomic:
1233       return true;
1234     case Intrinsic::memset:
1235     case Intrinsic::memmove:
1236     case Intrinsic::memcpy:
1237       if (!cast<MemIntrinsic>(II)->isVolatile())
1238         return true;
1239       return false;
1240     default:
1241       return false;
1242     }
1243   }
1244   return false;
1245 }
1246 
1247 bool AANoSyncImpl::isVolatile(Instruction *I) {
1248   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1249 
1250   switch (I->getOpcode()) {
1251   case Instruction::AtomicRMW:
1252     return cast<AtomicRMWInst>(I)->isVolatile();
1253   case Instruction::Store:
1254     return cast<StoreInst>(I)->isVolatile();
1255   case Instruction::Load:
1256     return cast<LoadInst>(I)->isVolatile();
1257   case Instruction::AtomicCmpXchg:
1258     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1259   default:
1260     return false;
1261   }
1262 }
1263 
1264 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1265 
1266   auto CheckRWInstForNoSync = [&](Instruction &I) {
1267     /// We are looking for volatile instructions or Non-Relaxed atomics.
1268     /// FIXME: We should improve the handling of intrinsics.
1269 
1270     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1271       return true;
1272 
1273     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1274       if (CB->hasFnAttr(Attribute::NoSync))
1275         return true;
1276 
1277       const auto &NoSyncAA =
1278           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1279       if (NoSyncAA.isAssumedNoSync())
1280         return true;
1281       return false;
1282     }
1283 
1284     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1285       return true;
1286 
1287     return false;
1288   };
1289 
1290   auto CheckForNoSync = [&](Instruction &I) {
1291     // At this point we handled all read/write effects and they are all
1292     // nosync, so they can be skipped.
1293     if (I.mayReadOrWriteMemory())
1294       return true;
1295 
1296     // non-convergent and readnone imply nosync.
1297     return !cast<CallBase>(I).isConvergent();
1298   };
1299 
1300   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1301       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1302     return indicatePessimisticFixpoint();
1303 
1304   return ChangeStatus::UNCHANGED;
1305 }
1306 
1307 struct AANoSyncFunction final : public AANoSyncImpl {
1308   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1309       : AANoSyncImpl(IRP, A) {}
1310 
1311   /// See AbstractAttribute::trackStatistics()
1312   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1313 };
1314 
1315 /// NoSync attribute deduction for a call sites.
1316 struct AANoSyncCallSite final : AANoSyncImpl {
1317   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1318       : AANoSyncImpl(IRP, A) {}
1319 
1320   /// See AbstractAttribute::initialize(...).
1321   void initialize(Attributor &A) override {
1322     AANoSyncImpl::initialize(A);
1323     Function *F = getAssociatedFunction();
1324     if (!F)
1325       indicatePessimisticFixpoint();
1326   }
1327 
1328   /// See AbstractAttribute::updateImpl(...).
1329   ChangeStatus updateImpl(Attributor &A) override {
1330     // TODO: Once we have call site specific value information we can provide
1331     //       call site specific liveness information and then it makes
1332     //       sense to specialize attributes for call sites arguments instead of
1333     //       redirecting requests to the callee argument.
1334     Function *F = getAssociatedFunction();
1335     const IRPosition &FnPos = IRPosition::function(*F);
1336     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1337     return clampStateAndIndicateChange(
1338         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1339   }
1340 
1341   /// See AbstractAttribute::trackStatistics()
1342   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1343 };
1344 
1345 /// ------------------------ No-Free Attributes ----------------------------
1346 
1347 struct AANoFreeImpl : public AANoFree {
1348   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1349 
1350   /// See AbstractAttribute::updateImpl(...).
1351   ChangeStatus updateImpl(Attributor &A) override {
1352     auto CheckForNoFree = [&](Instruction &I) {
1353       const auto &CB = cast<CallBase>(I);
1354       if (CB.hasFnAttr(Attribute::NoFree))
1355         return true;
1356 
1357       const auto &NoFreeAA =
1358           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1359       return NoFreeAA.isAssumedNoFree();
1360     };
1361 
1362     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1363       return indicatePessimisticFixpoint();
1364     return ChangeStatus::UNCHANGED;
1365   }
1366 
1367   /// See AbstractAttribute::getAsStr().
1368   const std::string getAsStr() const override {
1369     return getAssumed() ? "nofree" : "may-free";
1370   }
1371 };
1372 
1373 struct AANoFreeFunction final : public AANoFreeImpl {
1374   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1375       : AANoFreeImpl(IRP, A) {}
1376 
1377   /// See AbstractAttribute::trackStatistics()
1378   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1379 };
1380 
1381 /// NoFree attribute deduction for a call sites.
1382 struct AANoFreeCallSite final : AANoFreeImpl {
1383   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1384       : AANoFreeImpl(IRP, A) {}
1385 
1386   /// See AbstractAttribute::initialize(...).
1387   void initialize(Attributor &A) override {
1388     AANoFreeImpl::initialize(A);
1389     Function *F = getAssociatedFunction();
1390     if (!F)
1391       indicatePessimisticFixpoint();
1392   }
1393 
1394   /// See AbstractAttribute::updateImpl(...).
1395   ChangeStatus updateImpl(Attributor &A) override {
1396     // TODO: Once we have call site specific value information we can provide
1397     //       call site specific liveness information and then it makes
1398     //       sense to specialize attributes for call sites arguments instead of
1399     //       redirecting requests to the callee argument.
1400     Function *F = getAssociatedFunction();
1401     const IRPosition &FnPos = IRPosition::function(*F);
1402     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1403     return clampStateAndIndicateChange(
1404         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1405   }
1406 
1407   /// See AbstractAttribute::trackStatistics()
1408   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1409 };
1410 
1411 /// NoFree attribute for floating values.
1412 struct AANoFreeFloating : AANoFreeImpl {
1413   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1414       : AANoFreeImpl(IRP, A) {}
1415 
1416   /// See AbstractAttribute::trackStatistics()
1417   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1418 
1419   /// See Abstract Attribute::updateImpl(...).
1420   ChangeStatus updateImpl(Attributor &A) override {
1421     const IRPosition &IRP = getIRPosition();
1422 
1423     const auto &NoFreeAA =
1424         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1425     if (NoFreeAA.isAssumedNoFree())
1426       return ChangeStatus::UNCHANGED;
1427 
1428     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1429     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1430       Instruction *UserI = cast<Instruction>(U.getUser());
1431       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1432         if (CB->isBundleOperand(&U))
1433           return false;
1434         if (!CB->isArgOperand(&U))
1435           return true;
1436         unsigned ArgNo = CB->getArgOperandNo(&U);
1437 
1438         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1439             *this, IRPosition::callsite_argument(*CB, ArgNo));
1440         return NoFreeArg.isAssumedNoFree();
1441       }
1442 
1443       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1444           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1445         Follow = true;
1446         return true;
1447       }
1448       if (isa<ReturnInst>(UserI))
1449         return true;
1450 
1451       // Unknown user.
1452       return false;
1453     };
1454     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1455       return indicatePessimisticFixpoint();
1456 
1457     return ChangeStatus::UNCHANGED;
1458   }
1459 };
1460 
1461 /// NoFree attribute for a call site argument.
1462 struct AANoFreeArgument final : AANoFreeFloating {
1463   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1464       : AANoFreeFloating(IRP, A) {}
1465 
1466   /// See AbstractAttribute::trackStatistics()
1467   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1468 };
1469 
1470 /// NoFree attribute for call site arguments.
1471 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1472   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1473       : AANoFreeFloating(IRP, A) {}
1474 
1475   /// See AbstractAttribute::updateImpl(...).
1476   ChangeStatus updateImpl(Attributor &A) override {
1477     // TODO: Once we have call site specific value information we can provide
1478     //       call site specific liveness information and then it makes
1479     //       sense to specialize attributes for call sites arguments instead of
1480     //       redirecting requests to the callee argument.
1481     Argument *Arg = getAssociatedArgument();
1482     if (!Arg)
1483       return indicatePessimisticFixpoint();
1484     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1485     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1486     return clampStateAndIndicateChange(
1487         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1488   }
1489 
1490   /// See AbstractAttribute::trackStatistics()
1491   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1492 };
1493 
1494 /// NoFree attribute for function return value.
1495 struct AANoFreeReturned final : AANoFreeFloating {
1496   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1497       : AANoFreeFloating(IRP, A) {
1498     llvm_unreachable("NoFree is not applicable to function returns!");
1499   }
1500 
1501   /// See AbstractAttribute::initialize(...).
1502   void initialize(Attributor &A) override {
1503     llvm_unreachable("NoFree is not applicable to function returns!");
1504   }
1505 
1506   /// See AbstractAttribute::updateImpl(...).
1507   ChangeStatus updateImpl(Attributor &A) override {
1508     llvm_unreachable("NoFree is not applicable to function returns!");
1509   }
1510 
1511   /// See AbstractAttribute::trackStatistics()
1512   void trackStatistics() const override {}
1513 };
1514 
1515 /// NoFree attribute deduction for a call site return value.
1516 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1517   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   ChangeStatus manifest(Attributor &A) override {
1521     return ChangeStatus::UNCHANGED;
1522   }
1523   /// See AbstractAttribute::trackStatistics()
1524   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1525 };
1526 
1527 /// ------------------------ NonNull Argument Attribute ------------------------
1528 static int64_t getKnownNonNullAndDerefBytesForUse(
1529     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1530     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1531   TrackUse = false;
1532 
1533   const Value *UseV = U->get();
1534   if (!UseV->getType()->isPointerTy())
1535     return 0;
1536 
1537   Type *PtrTy = UseV->getType();
1538   const Function *F = I->getFunction();
1539   bool NullPointerIsDefined =
1540       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1541   const DataLayout &DL = A.getInfoCache().getDL();
1542   if (const auto *CB = dyn_cast<CallBase>(I)) {
1543     if (CB->isBundleOperand(U)) {
1544       if (RetainedKnowledge RK = getKnowledgeFromUse(
1545               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1546         IsNonNull |=
1547             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1548         return RK.ArgValue;
1549       }
1550       return 0;
1551     }
1552 
1553     if (CB->isCallee(U)) {
1554       IsNonNull |= !NullPointerIsDefined;
1555       return 0;
1556     }
1557 
1558     unsigned ArgNo = CB->getArgOperandNo(U);
1559     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1560     // As long as we only use known information there is no need to track
1561     // dependences here.
1562     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1563                                                   /* TrackDependence */ false);
1564     IsNonNull |= DerefAA.isKnownNonNull();
1565     return DerefAA.getKnownDereferenceableBytes();
1566   }
1567 
1568   // We need to follow common pointer manipulation uses to the accesses they
1569   // feed into. We can try to be smart to avoid looking through things we do not
1570   // like for now, e.g., non-inbounds GEPs.
1571   if (isa<CastInst>(I)) {
1572     TrackUse = true;
1573     return 0;
1574   }
1575   if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1576     if (GEP->hasAllConstantIndices()) {
1577       TrackUse = true;
1578       return 0;
1579     }
1580 
1581   int64_t Offset;
1582   if (const Value *Base = getBasePointerOfAccessPointerOperand(I, Offset, DL)) {
1583     if (Base == &AssociatedValue &&
1584         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1585       int64_t DerefBytes =
1586           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1587 
1588       IsNonNull |= !NullPointerIsDefined;
1589       return std::max(int64_t(0), DerefBytes);
1590     }
1591   }
1592 
1593   /// Corner case when an offset is 0.
1594   if (const Value *Base = getBasePointerOfAccessPointerOperand(
1595           I, Offset, DL, /*AllowNonInbounds*/ true)) {
1596     if (Offset == 0 && Base == &AssociatedValue &&
1597         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1598       int64_t DerefBytes =
1599           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1600       IsNonNull |= !NullPointerIsDefined;
1601       return std::max(int64_t(0), DerefBytes);
1602     }
1603   }
1604 
1605   return 0;
1606 }
1607 
1608 struct AANonNullImpl : AANonNull {
1609   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1610       : AANonNull(IRP, A),
1611         NullIsDefined(NullPointerIsDefined(
1612             getAnchorScope(),
1613             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1614 
1615   /// See AbstractAttribute::initialize(...).
1616   void initialize(Attributor &A) override {
1617     Value &V = getAssociatedValue();
1618     if (!NullIsDefined &&
1619         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1620                 /* IgnoreSubsumingPositions */ false, &A))
1621       indicateOptimisticFixpoint();
1622     else if (isa<ConstantPointerNull>(V))
1623       indicatePessimisticFixpoint();
1624     else
1625       AANonNull::initialize(A);
1626 
1627     bool CanBeNull = true;
1628     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull))
1629       if (!CanBeNull)
1630         indicateOptimisticFixpoint();
1631 
1632     if (!getState().isAtFixpoint())
1633       if (Instruction *CtxI = getCtxI())
1634         followUsesInMBEC(*this, A, getState(), *CtxI);
1635   }
1636 
1637   /// See followUsesInMBEC
1638   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1639                        AANonNull::StateType &State) {
1640     bool IsNonNull = false;
1641     bool TrackUse = false;
1642     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1643                                        IsNonNull, TrackUse);
1644     State.setKnown(IsNonNull);
1645     return TrackUse;
1646   }
1647 
1648   /// See AbstractAttribute::getAsStr().
1649   const std::string getAsStr() const override {
1650     return getAssumed() ? "nonnull" : "may-null";
1651   }
1652 
1653   /// Flag to determine if the underlying value can be null and still allow
1654   /// valid accesses.
1655   const bool NullIsDefined;
1656 };
1657 
1658 /// NonNull attribute for a floating value.
1659 struct AANonNullFloating : public AANonNullImpl {
1660   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1661       : AANonNullImpl(IRP, A) {}
1662 
1663   /// See AbstractAttribute::updateImpl(...).
1664   ChangeStatus updateImpl(Attributor &A) override {
1665     if (!NullIsDefined) {
1666       const auto &DerefAA =
1667           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1668       if (DerefAA.getAssumedDereferenceableBytes())
1669         return ChangeStatus::UNCHANGED;
1670     }
1671 
1672     const DataLayout &DL = A.getDataLayout();
1673 
1674     DominatorTree *DT = nullptr;
1675     AssumptionCache *AC = nullptr;
1676     InformationCache &InfoCache = A.getInfoCache();
1677     if (const Function *Fn = getAnchorScope()) {
1678       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1679       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1680     }
1681 
1682     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1683                             AANonNull::StateType &T, bool Stripped) -> bool {
1684       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1685       if (!Stripped && this == &AA) {
1686         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1687           T.indicatePessimisticFixpoint();
1688       } else {
1689         // Use abstract attribute information.
1690         const AANonNull::StateType &NS =
1691             static_cast<const AANonNull::StateType &>(AA.getState());
1692         T ^= NS;
1693       }
1694       return T.isValidState();
1695     };
1696 
1697     StateType T;
1698     if (!genericValueTraversal<AANonNull, StateType>(
1699             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1700       return indicatePessimisticFixpoint();
1701 
1702     return clampStateAndIndicateChange(getState(), T);
1703   }
1704 
1705   /// See AbstractAttribute::trackStatistics()
1706   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1707 };
1708 
1709 /// NonNull attribute for function return value.
1710 struct AANonNullReturned final
1711     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1712   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1713       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1714 
1715   /// See AbstractAttribute::trackStatistics()
1716   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1717 };
1718 
1719 /// NonNull attribute for function argument.
1720 struct AANonNullArgument final
1721     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1722   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1723       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1724 
1725   /// See AbstractAttribute::trackStatistics()
1726   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1727 };
1728 
1729 struct AANonNullCallSiteArgument final : AANonNullFloating {
1730   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1731       : AANonNullFloating(IRP, A) {}
1732 
1733   /// See AbstractAttribute::trackStatistics()
1734   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1735 };
1736 
1737 /// NonNull attribute for a call site return position.
1738 struct AANonNullCallSiteReturned final
1739     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1740   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1741       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1742 
1743   /// See AbstractAttribute::trackStatistics()
1744   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1745 };
1746 
1747 /// ------------------------ No-Recurse Attributes ----------------------------
1748 
1749 struct AANoRecurseImpl : public AANoRecurse {
1750   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1751 
1752   /// See AbstractAttribute::getAsStr()
1753   const std::string getAsStr() const override {
1754     return getAssumed() ? "norecurse" : "may-recurse";
1755   }
1756 };
1757 
1758 struct AANoRecurseFunction final : AANoRecurseImpl {
1759   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1760       : AANoRecurseImpl(IRP, A) {}
1761 
1762   /// See AbstractAttribute::initialize(...).
1763   void initialize(Attributor &A) override {
1764     AANoRecurseImpl::initialize(A);
1765     if (const Function *F = getAnchorScope())
1766       if (A.getInfoCache().getSccSize(*F) != 1)
1767         indicatePessimisticFixpoint();
1768   }
1769 
1770   /// See AbstractAttribute::updateImpl(...).
1771   ChangeStatus updateImpl(Attributor &A) override {
1772 
1773     // If all live call sites are known to be no-recurse, we are as well.
1774     auto CallSitePred = [&](AbstractCallSite ACS) {
1775       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1776           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1777           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1778       return NoRecurseAA.isKnownNoRecurse();
1779     };
1780     bool AllCallSitesKnown;
1781     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1782       // If we know all call sites and all are known no-recurse, we are done.
1783       // If all known call sites, which might not be all that exist, are known
1784       // to be no-recurse, we are not done but we can continue to assume
1785       // no-recurse. If one of the call sites we have not visited will become
1786       // live, another update is triggered.
1787       if (AllCallSitesKnown)
1788         indicateOptimisticFixpoint();
1789       return ChangeStatus::UNCHANGED;
1790     }
1791 
1792     // If the above check does not hold anymore we look at the calls.
1793     auto CheckForNoRecurse = [&](Instruction &I) {
1794       const auto &CB = cast<CallBase>(I);
1795       if (CB.hasFnAttr(Attribute::NoRecurse))
1796         return true;
1797 
1798       const auto &NoRecurseAA =
1799           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1800       if (!NoRecurseAA.isAssumedNoRecurse())
1801         return false;
1802 
1803       // Recursion to the same function
1804       if (CB.getCalledFunction() == getAnchorScope())
1805         return false;
1806 
1807       return true;
1808     };
1809 
1810     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1811       return indicatePessimisticFixpoint();
1812     return ChangeStatus::UNCHANGED;
1813   }
1814 
1815   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1816 };
1817 
1818 /// NoRecurse attribute deduction for a call sites.
1819 struct AANoRecurseCallSite final : AANoRecurseImpl {
1820   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1821       : AANoRecurseImpl(IRP, A) {}
1822 
1823   /// See AbstractAttribute::initialize(...).
1824   void initialize(Attributor &A) override {
1825     AANoRecurseImpl::initialize(A);
1826     Function *F = getAssociatedFunction();
1827     if (!F)
1828       indicatePessimisticFixpoint();
1829   }
1830 
1831   /// See AbstractAttribute::updateImpl(...).
1832   ChangeStatus updateImpl(Attributor &A) override {
1833     // TODO: Once we have call site specific value information we can provide
1834     //       call site specific liveness information and then it makes
1835     //       sense to specialize attributes for call sites arguments instead of
1836     //       redirecting requests to the callee argument.
1837     Function *F = getAssociatedFunction();
1838     const IRPosition &FnPos = IRPosition::function(*F);
1839     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1840     return clampStateAndIndicateChange(
1841         getState(),
1842         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1843   }
1844 
1845   /// See AbstractAttribute::trackStatistics()
1846   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1847 };
1848 
1849 /// -------------------- Undefined-Behavior Attributes ------------------------
1850 
1851 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1852   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1853       : AAUndefinedBehavior(IRP, A) {}
1854 
1855   /// See AbstractAttribute::updateImpl(...).
1856   // through a pointer (i.e. also branches etc.)
1857   ChangeStatus updateImpl(Attributor &A) override {
1858     const size_t UBPrevSize = KnownUBInsts.size();
1859     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1860 
1861     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1862       // Skip instructions that are already saved.
1863       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1864         return true;
1865 
1866       // If we reach here, we know we have an instruction
1867       // that accesses memory through a pointer operand,
1868       // for which getPointerOperand() should give it to us.
1869       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1870       assert(PtrOp &&
1871              "Expected pointer operand of memory accessing instruction");
1872 
1873       // Either we stopped and the appropriate action was taken,
1874       // or we got back a simplified value to continue.
1875       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1876       if (!SimplifiedPtrOp.hasValue())
1877         return true;
1878       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1879 
1880       // A memory access through a pointer is considered UB
1881       // only if the pointer has constant null value.
1882       // TODO: Expand it to not only check constant values.
1883       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1884         AssumedNoUBInsts.insert(&I);
1885         return true;
1886       }
1887       const Type *PtrTy = PtrOpVal->getType();
1888 
1889       // Because we only consider instructions inside functions,
1890       // assume that a parent function exists.
1891       const Function *F = I.getFunction();
1892 
1893       // A memory access using constant null pointer is only considered UB
1894       // if null pointer is _not_ defined for the target platform.
1895       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1896         AssumedNoUBInsts.insert(&I);
1897       else
1898         KnownUBInsts.insert(&I);
1899       return true;
1900     };
1901 
1902     auto InspectBrInstForUB = [&](Instruction &I) {
1903       // A conditional branch instruction is considered UB if it has `undef`
1904       // condition.
1905 
1906       // Skip instructions that are already saved.
1907       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1908         return true;
1909 
1910       // We know we have a branch instruction.
1911       auto BrInst = cast<BranchInst>(&I);
1912 
1913       // Unconditional branches are never considered UB.
1914       if (BrInst->isUnconditional())
1915         return true;
1916 
1917       // Either we stopped and the appropriate action was taken,
1918       // or we got back a simplified value to continue.
1919       Optional<Value *> SimplifiedCond =
1920           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1921       if (!SimplifiedCond.hasValue())
1922         return true;
1923       AssumedNoUBInsts.insert(&I);
1924       return true;
1925     };
1926 
1927     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
1928                               {Instruction::Load, Instruction::Store,
1929                                Instruction::AtomicCmpXchg,
1930                                Instruction::AtomicRMW},
1931                               /* CheckBBLivenessOnly */ true);
1932     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
1933                               /* CheckBBLivenessOnly */ true);
1934     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
1935         UBPrevSize != KnownUBInsts.size())
1936       return ChangeStatus::CHANGED;
1937     return ChangeStatus::UNCHANGED;
1938   }
1939 
1940   bool isKnownToCauseUB(Instruction *I) const override {
1941     return KnownUBInsts.count(I);
1942   }
1943 
1944   bool isAssumedToCauseUB(Instruction *I) const override {
1945     // In simple words, if an instruction is not in the assumed to _not_
1946     // cause UB, then it is assumed UB (that includes those
1947     // in the KnownUBInsts set). The rest is boilerplate
1948     // is to ensure that it is one of the instructions we test
1949     // for UB.
1950 
1951     switch (I->getOpcode()) {
1952     case Instruction::Load:
1953     case Instruction::Store:
1954     case Instruction::AtomicCmpXchg:
1955     case Instruction::AtomicRMW:
1956       return !AssumedNoUBInsts.count(I);
1957     case Instruction::Br: {
1958       auto BrInst = cast<BranchInst>(I);
1959       if (BrInst->isUnconditional())
1960         return false;
1961       return !AssumedNoUBInsts.count(I);
1962     } break;
1963     default:
1964       return false;
1965     }
1966     return false;
1967   }
1968 
1969   ChangeStatus manifest(Attributor &A) override {
1970     if (KnownUBInsts.empty())
1971       return ChangeStatus::UNCHANGED;
1972     for (Instruction *I : KnownUBInsts)
1973       A.changeToUnreachableAfterManifest(I);
1974     return ChangeStatus::CHANGED;
1975   }
1976 
1977   /// See AbstractAttribute::getAsStr()
1978   const std::string getAsStr() const override {
1979     return getAssumed() ? "undefined-behavior" : "no-ub";
1980   }
1981 
1982   /// Note: The correctness of this analysis depends on the fact that the
1983   /// following 2 sets will stop changing after some point.
1984   /// "Change" here means that their size changes.
1985   /// The size of each set is monotonically increasing
1986   /// (we only add items to them) and it is upper bounded by the number of
1987   /// instructions in the processed function (we can never save more
1988   /// elements in either set than this number). Hence, at some point,
1989   /// they will stop increasing.
1990   /// Consequently, at some point, both sets will have stopped
1991   /// changing, effectively making the analysis reach a fixpoint.
1992 
1993   /// Note: These 2 sets are disjoint and an instruction can be considered
1994   /// one of 3 things:
1995   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
1996   ///    the KnownUBInsts set.
1997   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
1998   ///    has a reason to assume it).
1999   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2000   ///    could not find a reason to assume or prove that it can cause UB,
2001   ///    hence it assumes it doesn't. We have a set for these instructions
2002   ///    so that we don't reprocess them in every update.
2003   ///    Note however that instructions in this set may cause UB.
2004 
2005 protected:
2006   /// A set of all live instructions _known_ to cause UB.
2007   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2008 
2009 private:
2010   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2011   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2012 
2013   // Should be called on updates in which if we're processing an instruction
2014   // \p I that depends on a value \p V, one of the following has to happen:
2015   // - If the value is assumed, then stop.
2016   // - If the value is known but undef, then consider it UB.
2017   // - Otherwise, do specific processing with the simplified value.
2018   // We return None in the first 2 cases to signify that an appropriate
2019   // action was taken and the caller should stop.
2020   // Otherwise, we return the simplified value that the caller should
2021   // use for specific processing.
2022   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2023                                          Instruction *I) {
2024     const auto &ValueSimplifyAA =
2025         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2026     Optional<Value *> SimplifiedV =
2027         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2028     if (!ValueSimplifyAA.isKnown()) {
2029       // Don't depend on assumed values.
2030       return llvm::None;
2031     }
2032     if (!SimplifiedV.hasValue()) {
2033       // If it is known (which we tested above) but it doesn't have a value,
2034       // then we can assume `undef` and hence the instruction is UB.
2035       KnownUBInsts.insert(I);
2036       return llvm::None;
2037     }
2038     Value *Val = SimplifiedV.getValue();
2039     if (isa<UndefValue>(Val)) {
2040       KnownUBInsts.insert(I);
2041       return llvm::None;
2042     }
2043     return Val;
2044   }
2045 };
2046 
2047 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2048   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2049       : AAUndefinedBehaviorImpl(IRP, A) {}
2050 
2051   /// See AbstractAttribute::trackStatistics()
2052   void trackStatistics() const override {
2053     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2054                "Number of instructions known to have UB");
2055     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2056         KnownUBInsts.size();
2057   }
2058 };
2059 
2060 /// ------------------------ Will-Return Attributes ----------------------------
2061 
2062 // Helper function that checks whether a function has any cycle which we don't
2063 // know if it is bounded or not.
2064 // Loops with maximum trip count are considered bounded, any other cycle not.
2065 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2066   ScalarEvolution *SE =
2067       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2068   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2069   // If either SCEV or LoopInfo is not available for the function then we assume
2070   // any cycle to be unbounded cycle.
2071   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2072   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2073   if (!SE || !LI) {
2074     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2075       if (SCCI.hasCycle())
2076         return true;
2077     return false;
2078   }
2079 
2080   // If there's irreducible control, the function may contain non-loop cycles.
2081   if (mayContainIrreducibleControl(F, LI))
2082     return true;
2083 
2084   // Any loop that does not have a max trip count is considered unbounded cycle.
2085   for (auto *L : LI->getLoopsInPreorder()) {
2086     if (!SE->getSmallConstantMaxTripCount(L))
2087       return true;
2088   }
2089   return false;
2090 }
2091 
2092 struct AAWillReturnImpl : public AAWillReturn {
2093   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2094       : AAWillReturn(IRP, A) {}
2095 
2096   /// See AbstractAttribute::initialize(...).
2097   void initialize(Attributor &A) override {
2098     AAWillReturn::initialize(A);
2099 
2100     Function *F = getAnchorScope();
2101     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2102       indicatePessimisticFixpoint();
2103   }
2104 
2105   /// See AbstractAttribute::updateImpl(...).
2106   ChangeStatus updateImpl(Attributor &A) override {
2107     auto CheckForWillReturn = [&](Instruction &I) {
2108       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2109       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2110       if (WillReturnAA.isKnownWillReturn())
2111         return true;
2112       if (!WillReturnAA.isAssumedWillReturn())
2113         return false;
2114       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2115       return NoRecurseAA.isAssumedNoRecurse();
2116     };
2117 
2118     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2119       return indicatePessimisticFixpoint();
2120 
2121     return ChangeStatus::UNCHANGED;
2122   }
2123 
2124   /// See AbstractAttribute::getAsStr()
2125   const std::string getAsStr() const override {
2126     return getAssumed() ? "willreturn" : "may-noreturn";
2127   }
2128 };
2129 
2130 struct AAWillReturnFunction final : AAWillReturnImpl {
2131   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2132       : AAWillReturnImpl(IRP, A) {}
2133 
2134   /// See AbstractAttribute::trackStatistics()
2135   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2136 };
2137 
2138 /// WillReturn attribute deduction for a call sites.
2139 struct AAWillReturnCallSite final : AAWillReturnImpl {
2140   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2141       : AAWillReturnImpl(IRP, A) {}
2142 
2143   /// See AbstractAttribute::initialize(...).
2144   void initialize(Attributor &A) override {
2145     AAWillReturnImpl::initialize(A);
2146     Function *F = getAssociatedFunction();
2147     if (!F)
2148       indicatePessimisticFixpoint();
2149   }
2150 
2151   /// See AbstractAttribute::updateImpl(...).
2152   ChangeStatus updateImpl(Attributor &A) override {
2153     // TODO: Once we have call site specific value information we can provide
2154     //       call site specific liveness information and then it makes
2155     //       sense to specialize attributes for call sites arguments instead of
2156     //       redirecting requests to the callee argument.
2157     Function *F = getAssociatedFunction();
2158     const IRPosition &FnPos = IRPosition::function(*F);
2159     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2160     return clampStateAndIndicateChange(
2161         getState(),
2162         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2163   }
2164 
2165   /// See AbstractAttribute::trackStatistics()
2166   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2167 };
2168 
2169 /// -------------------AAReachability Attribute--------------------------
2170 
2171 struct AAReachabilityImpl : AAReachability {
2172   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2173       : AAReachability(IRP, A) {}
2174 
2175   const std::string getAsStr() const override {
2176     // TODO: Return the number of reachable queries.
2177     return "reachable";
2178   }
2179 
2180   /// See AbstractAttribute::initialize(...).
2181   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2182 
2183   /// See AbstractAttribute::updateImpl(...).
2184   ChangeStatus updateImpl(Attributor &A) override {
2185     return indicatePessimisticFixpoint();
2186   }
2187 };
2188 
2189 struct AAReachabilityFunction final : public AAReachabilityImpl {
2190   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2191       : AAReachabilityImpl(IRP, A) {}
2192 
2193   /// See AbstractAttribute::trackStatistics()
2194   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2195 };
2196 
2197 /// ------------------------ NoAlias Argument Attribute ------------------------
2198 
2199 struct AANoAliasImpl : AANoAlias {
2200   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2201     assert(getAssociatedType()->isPointerTy() &&
2202            "Noalias is a pointer attribute");
2203   }
2204 
2205   const std::string getAsStr() const override {
2206     return getAssumed() ? "noalias" : "may-alias";
2207   }
2208 };
2209 
2210 /// NoAlias attribute for a floating value.
2211 struct AANoAliasFloating final : AANoAliasImpl {
2212   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2213       : AANoAliasImpl(IRP, A) {}
2214 
2215   /// See AbstractAttribute::initialize(...).
2216   void initialize(Attributor &A) override {
2217     AANoAliasImpl::initialize(A);
2218     Value *Val = &getAssociatedValue();
2219     do {
2220       CastInst *CI = dyn_cast<CastInst>(Val);
2221       if (!CI)
2222         break;
2223       Value *Base = CI->getOperand(0);
2224       if (!Base->hasOneUse())
2225         break;
2226       Val = Base;
2227     } while (true);
2228 
2229     if (!Val->getType()->isPointerTy()) {
2230       indicatePessimisticFixpoint();
2231       return;
2232     }
2233 
2234     if (isa<AllocaInst>(Val))
2235       indicateOptimisticFixpoint();
2236     else if (isa<ConstantPointerNull>(Val) &&
2237              !NullPointerIsDefined(getAnchorScope(),
2238                                    Val->getType()->getPointerAddressSpace()))
2239       indicateOptimisticFixpoint();
2240     else if (Val != &getAssociatedValue()) {
2241       const auto &ValNoAliasAA =
2242           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2243       if (ValNoAliasAA.isKnownNoAlias())
2244         indicateOptimisticFixpoint();
2245     }
2246   }
2247 
2248   /// See AbstractAttribute::updateImpl(...).
2249   ChangeStatus updateImpl(Attributor &A) override {
2250     // TODO: Implement this.
2251     return indicatePessimisticFixpoint();
2252   }
2253 
2254   /// See AbstractAttribute::trackStatistics()
2255   void trackStatistics() const override {
2256     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2257   }
2258 };
2259 
2260 /// NoAlias attribute for an argument.
2261 struct AANoAliasArgument final
2262     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2263   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2264   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2265 
2266   /// See AbstractAttribute::initialize(...).
2267   void initialize(Attributor &A) override {
2268     Base::initialize(A);
2269     // See callsite argument attribute and callee argument attribute.
2270     if (hasAttr({Attribute::ByVal}))
2271       indicateOptimisticFixpoint();
2272   }
2273 
2274   /// See AbstractAttribute::update(...).
2275   ChangeStatus updateImpl(Attributor &A) override {
2276     // We have to make sure no-alias on the argument does not break
2277     // synchronization when this is a callback argument, see also [1] below.
2278     // If synchronization cannot be affected, we delegate to the base updateImpl
2279     // function, otherwise we give up for now.
2280 
2281     // If the function is no-sync, no-alias cannot break synchronization.
2282     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2283         *this, IRPosition::function_scope(getIRPosition()));
2284     if (NoSyncAA.isAssumedNoSync())
2285       return Base::updateImpl(A);
2286 
2287     // If the argument is read-only, no-alias cannot break synchronization.
2288     const auto &MemBehaviorAA =
2289         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2290     if (MemBehaviorAA.isAssumedReadOnly())
2291       return Base::updateImpl(A);
2292 
2293     // If the argument is never passed through callbacks, no-alias cannot break
2294     // synchronization.
2295     bool AllCallSitesKnown;
2296     if (A.checkForAllCallSites(
2297             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2298             true, AllCallSitesKnown))
2299       return Base::updateImpl(A);
2300 
2301     // TODO: add no-alias but make sure it doesn't break synchronization by
2302     // introducing fake uses. See:
2303     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2304     //     International Workshop on OpenMP 2018,
2305     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2306 
2307     return indicatePessimisticFixpoint();
2308   }
2309 
2310   /// See AbstractAttribute::trackStatistics()
2311   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2312 };
2313 
2314 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2315   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2316       : AANoAliasImpl(IRP, A) {}
2317 
2318   /// See AbstractAttribute::initialize(...).
2319   void initialize(Attributor &A) override {
2320     // See callsite argument attribute and callee argument attribute.
2321     const auto &CB = cast<CallBase>(getAnchorValue());
2322     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2323       indicateOptimisticFixpoint();
2324     Value &Val = getAssociatedValue();
2325     if (isa<ConstantPointerNull>(Val) &&
2326         !NullPointerIsDefined(getAnchorScope(),
2327                               Val.getType()->getPointerAddressSpace()))
2328       indicateOptimisticFixpoint();
2329   }
2330 
2331   /// Determine if the underlying value may alias with the call site argument
2332   /// \p OtherArgNo of \p ICS (= the underlying call site).
2333   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2334                             const AAMemoryBehavior &MemBehaviorAA,
2335                             const CallBase &CB, unsigned OtherArgNo) {
2336     // We do not need to worry about aliasing with the underlying IRP.
2337     if (this->getArgNo() == (int)OtherArgNo)
2338       return false;
2339 
2340     // If it is not a pointer or pointer vector we do not alias.
2341     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2342     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2343       return false;
2344 
2345     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2346         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2347         /* TrackDependence */ false);
2348 
2349     // If the argument is readnone, there is no read-write aliasing.
2350     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2351       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2352       return false;
2353     }
2354 
2355     // If the argument is readonly and the underlying value is readonly, there
2356     // is no read-write aliasing.
2357     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2358     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2359       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2360       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2361       return false;
2362     }
2363 
2364     // We have to utilize actual alias analysis queries so we need the object.
2365     if (!AAR)
2366       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2367 
2368     // Try to rule it out at the call site.
2369     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2370     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2371                          "callsite arguments: "
2372                       << getAssociatedValue() << " " << *ArgOp << " => "
2373                       << (IsAliasing ? "" : "no-") << "alias \n");
2374 
2375     return IsAliasing;
2376   }
2377 
2378   bool
2379   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2380                                          const AAMemoryBehavior &MemBehaviorAA,
2381                                          const AANoAlias &NoAliasAA) {
2382     // We can deduce "noalias" if the following conditions hold.
2383     // (i)   Associated value is assumed to be noalias in the definition.
2384     // (ii)  Associated value is assumed to be no-capture in all the uses
2385     //       possibly executed before this callsite.
2386     // (iii) There is no other pointer argument which could alias with the
2387     //       value.
2388 
2389     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2390     if (!AssociatedValueIsNoAliasAtDef) {
2391       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2392                         << " is not no-alias at the definition\n");
2393       return false;
2394     }
2395 
2396     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2397 
2398     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2399     auto &NoCaptureAA =
2400         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2401     // Check whether the value is captured in the scope using AANoCapture.
2402     //      Look at CFG and check only uses possibly executed before this
2403     //      callsite.
2404     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2405       Instruction *UserI = cast<Instruction>(U.getUser());
2406 
2407       // If user if curr instr and only use.
2408       if (UserI == getCtxI() && UserI->hasOneUse())
2409         return true;
2410 
2411       const Function *ScopeFn = VIRP.getAnchorScope();
2412       if (ScopeFn) {
2413         const auto &ReachabilityAA =
2414             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2415 
2416         if (!ReachabilityAA.isAssumedReachable(UserI, getCtxI()))
2417           return true;
2418 
2419         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2420           if (CB->isArgOperand(&U)) {
2421 
2422             unsigned ArgNo = CB->getArgOperandNo(&U);
2423 
2424             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2425                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2426 
2427             if (NoCaptureAA.isAssumedNoCapture())
2428               return true;
2429           }
2430         }
2431       }
2432 
2433       // For cases which can potentially have more users
2434       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2435           isa<SelectInst>(U)) {
2436         Follow = true;
2437         return true;
2438       }
2439 
2440       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2441       return false;
2442     };
2443 
2444     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2445       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2446         LLVM_DEBUG(
2447             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2448                    << " cannot be noalias as it is potentially captured\n");
2449         return false;
2450       }
2451     }
2452     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2453 
2454     // Check there is no other pointer argument which could alias with the
2455     // value passed at this call site.
2456     // TODO: AbstractCallSite
2457     const auto &CB = cast<CallBase>(getAnchorValue());
2458     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2459          OtherArgNo++)
2460       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2461         return false;
2462 
2463     return true;
2464   }
2465 
2466   /// See AbstractAttribute::updateImpl(...).
2467   ChangeStatus updateImpl(Attributor &A) override {
2468     // If the argument is readnone we are done as there are no accesses via the
2469     // argument.
2470     auto &MemBehaviorAA =
2471         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2472                                      /* TrackDependence */ false);
2473     if (MemBehaviorAA.isAssumedReadNone()) {
2474       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2475       return ChangeStatus::UNCHANGED;
2476     }
2477 
2478     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2479     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2480                                                   /* TrackDependence */ false);
2481 
2482     AAResults *AAR = nullptr;
2483     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2484                                                NoAliasAA)) {
2485       LLVM_DEBUG(
2486           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2487       return ChangeStatus::UNCHANGED;
2488     }
2489 
2490     return indicatePessimisticFixpoint();
2491   }
2492 
2493   /// See AbstractAttribute::trackStatistics()
2494   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2495 };
2496 
2497 /// NoAlias attribute for function return value.
2498 struct AANoAliasReturned final : AANoAliasImpl {
2499   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2500       : AANoAliasImpl(IRP, A) {}
2501 
2502   /// See AbstractAttribute::updateImpl(...).
2503   virtual ChangeStatus updateImpl(Attributor &A) override {
2504 
2505     auto CheckReturnValue = [&](Value &RV) -> bool {
2506       if (Constant *C = dyn_cast<Constant>(&RV))
2507         if (C->isNullValue() || isa<UndefValue>(C))
2508           return true;
2509 
2510       /// For now, we can only deduce noalias if we have call sites.
2511       /// FIXME: add more support.
2512       if (!isa<CallBase>(&RV))
2513         return false;
2514 
2515       const IRPosition &RVPos = IRPosition::value(RV);
2516       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2517       if (!NoAliasAA.isAssumedNoAlias())
2518         return false;
2519 
2520       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2521       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2522     };
2523 
2524     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2525       return indicatePessimisticFixpoint();
2526 
2527     return ChangeStatus::UNCHANGED;
2528   }
2529 
2530   /// See AbstractAttribute::trackStatistics()
2531   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2532 };
2533 
2534 /// NoAlias attribute deduction for a call site return value.
2535 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2536   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2537       : AANoAliasImpl(IRP, A) {}
2538 
2539   /// See AbstractAttribute::initialize(...).
2540   void initialize(Attributor &A) override {
2541     AANoAliasImpl::initialize(A);
2542     Function *F = getAssociatedFunction();
2543     if (!F)
2544       indicatePessimisticFixpoint();
2545   }
2546 
2547   /// See AbstractAttribute::updateImpl(...).
2548   ChangeStatus updateImpl(Attributor &A) override {
2549     // TODO: Once we have call site specific value information we can provide
2550     //       call site specific liveness information and then it makes
2551     //       sense to specialize attributes for call sites arguments instead of
2552     //       redirecting requests to the callee argument.
2553     Function *F = getAssociatedFunction();
2554     const IRPosition &FnPos = IRPosition::returned(*F);
2555     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2556     return clampStateAndIndicateChange(
2557         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2558   }
2559 
2560   /// See AbstractAttribute::trackStatistics()
2561   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2562 };
2563 
2564 /// -------------------AAIsDead Function Attribute-----------------------
2565 
2566 struct AAIsDeadValueImpl : public AAIsDead {
2567   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2568 
2569   /// See AAIsDead::isAssumedDead().
2570   bool isAssumedDead() const override { return getAssumed(); }
2571 
2572   /// See AAIsDead::isKnownDead().
2573   bool isKnownDead() const override { return getKnown(); }
2574 
2575   /// See AAIsDead::isAssumedDead(BasicBlock *).
2576   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2577 
2578   /// See AAIsDead::isKnownDead(BasicBlock *).
2579   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2580 
2581   /// See AAIsDead::isAssumedDead(Instruction *I).
2582   bool isAssumedDead(const Instruction *I) const override {
2583     return I == getCtxI() && isAssumedDead();
2584   }
2585 
2586   /// See AAIsDead::isKnownDead(Instruction *I).
2587   bool isKnownDead(const Instruction *I) const override {
2588     return isAssumedDead(I) && getKnown();
2589   }
2590 
2591   /// See AbstractAttribute::getAsStr().
2592   const std::string getAsStr() const override {
2593     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2594   }
2595 
2596   /// Check if all uses are assumed dead.
2597   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2598     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2599     // Explicitly set the dependence class to required because we want a long
2600     // chain of N dependent instructions to be considered live as soon as one is
2601     // without going through N update cycles. This is not required for
2602     // correctness.
2603     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2604   }
2605 
2606   /// Determine if \p I is assumed to be side-effect free.
2607   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2608     if (!I || wouldInstructionBeTriviallyDead(I))
2609       return true;
2610 
2611     auto *CB = dyn_cast<CallBase>(I);
2612     if (!CB || isa<IntrinsicInst>(CB))
2613       return false;
2614 
2615     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2616     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2617         *this, CallIRP, /* TrackDependence */ false);
2618     if (!NoUnwindAA.isAssumedNoUnwind())
2619       return false;
2620     if (!NoUnwindAA.isKnownNoUnwind())
2621       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2622 
2623     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2624         *this, CallIRP, /* TrackDependence */ false);
2625     if (MemBehaviorAA.isAssumedReadOnly()) {
2626       if (!MemBehaviorAA.isKnownReadOnly())
2627         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2628       return true;
2629     }
2630     return false;
2631   }
2632 };
2633 
2634 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2635   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2636       : AAIsDeadValueImpl(IRP, A) {}
2637 
2638   /// See AbstractAttribute::initialize(...).
2639   void initialize(Attributor &A) override {
2640     if (isa<UndefValue>(getAssociatedValue())) {
2641       indicatePessimisticFixpoint();
2642       return;
2643     }
2644 
2645     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2646     if (!isAssumedSideEffectFree(A, I))
2647       indicatePessimisticFixpoint();
2648   }
2649 
2650   /// See AbstractAttribute::updateImpl(...).
2651   ChangeStatus updateImpl(Attributor &A) override {
2652     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2653     if (!isAssumedSideEffectFree(A, I))
2654       return indicatePessimisticFixpoint();
2655 
2656     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2657       return indicatePessimisticFixpoint();
2658     return ChangeStatus::UNCHANGED;
2659   }
2660 
2661   /// See AbstractAttribute::manifest(...).
2662   ChangeStatus manifest(Attributor &A) override {
2663     Value &V = getAssociatedValue();
2664     if (auto *I = dyn_cast<Instruction>(&V)) {
2665       // If we get here we basically know the users are all dead. We check if
2666       // isAssumedSideEffectFree returns true here again because it might not be
2667       // the case and only the users are dead but the instruction (=call) is
2668       // still needed.
2669       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2670         A.deleteAfterManifest(*I);
2671         return ChangeStatus::CHANGED;
2672       }
2673     }
2674     if (V.use_empty())
2675       return ChangeStatus::UNCHANGED;
2676 
2677     bool UsedAssumedInformation = false;
2678     Optional<Constant *> C =
2679         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2680     if (C.hasValue() && C.getValue())
2681       return ChangeStatus::UNCHANGED;
2682 
2683     // Replace the value with undef as it is dead but keep droppable uses around
2684     // as they provide information we don't want to give up on just yet.
2685     UndefValue &UV = *UndefValue::get(V.getType());
2686     bool AnyChange =
2687         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2688     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2689   }
2690 
2691   /// See AbstractAttribute::trackStatistics()
2692   void trackStatistics() const override {
2693     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2694   }
2695 };
2696 
2697 struct AAIsDeadArgument : public AAIsDeadFloating {
2698   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2699       : AAIsDeadFloating(IRP, A) {}
2700 
2701   /// See AbstractAttribute::initialize(...).
2702   void initialize(Attributor &A) override {
2703     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2704       indicatePessimisticFixpoint();
2705   }
2706 
2707   /// See AbstractAttribute::manifest(...).
2708   ChangeStatus manifest(Attributor &A) override {
2709     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2710     Argument &Arg = *getAssociatedArgument();
2711     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2712       if (A.registerFunctionSignatureRewrite(
2713               Arg, /* ReplacementTypes */ {},
2714               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2715               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2716         Arg.dropDroppableUses();
2717         return ChangeStatus::CHANGED;
2718       }
2719     return Changed;
2720   }
2721 
2722   /// See AbstractAttribute::trackStatistics()
2723   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2724 };
2725 
2726 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2727   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2728       : AAIsDeadValueImpl(IRP, A) {}
2729 
2730   /// See AbstractAttribute::initialize(...).
2731   void initialize(Attributor &A) override {
2732     if (isa<UndefValue>(getAssociatedValue()))
2733       indicatePessimisticFixpoint();
2734   }
2735 
2736   /// See AbstractAttribute::updateImpl(...).
2737   ChangeStatus updateImpl(Attributor &A) override {
2738     // TODO: Once we have call site specific value information we can provide
2739     //       call site specific liveness information and then it makes
2740     //       sense to specialize attributes for call sites arguments instead of
2741     //       redirecting requests to the callee argument.
2742     Argument *Arg = getAssociatedArgument();
2743     if (!Arg)
2744       return indicatePessimisticFixpoint();
2745     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2746     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2747     return clampStateAndIndicateChange(
2748         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2749   }
2750 
2751   /// See AbstractAttribute::manifest(...).
2752   ChangeStatus manifest(Attributor &A) override {
2753     CallBase &CB = cast<CallBase>(getAnchorValue());
2754     Use &U = CB.getArgOperandUse(getArgNo());
2755     assert(!isa<UndefValue>(U.get()) &&
2756            "Expected undef values to be filtered out!");
2757     UndefValue &UV = *UndefValue::get(U->getType());
2758     if (A.changeUseAfterManifest(U, UV))
2759       return ChangeStatus::CHANGED;
2760     return ChangeStatus::UNCHANGED;
2761   }
2762 
2763   /// See AbstractAttribute::trackStatistics()
2764   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2765 };
2766 
2767 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2768   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2769       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2770 
2771   /// See AAIsDead::isAssumedDead().
2772   bool isAssumedDead() const override {
2773     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2774   }
2775 
2776   /// See AbstractAttribute::initialize(...).
2777   void initialize(Attributor &A) override {
2778     if (isa<UndefValue>(getAssociatedValue())) {
2779       indicatePessimisticFixpoint();
2780       return;
2781     }
2782 
2783     // We track this separately as a secondary state.
2784     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2785   }
2786 
2787   /// See AbstractAttribute::updateImpl(...).
2788   ChangeStatus updateImpl(Attributor &A) override {
2789     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2790     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2791       IsAssumedSideEffectFree = false;
2792       Changed = ChangeStatus::CHANGED;
2793     }
2794 
2795     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2796       return indicatePessimisticFixpoint();
2797     return Changed;
2798   }
2799 
2800   /// See AbstractAttribute::trackStatistics()
2801   void trackStatistics() const override {
2802     if (IsAssumedSideEffectFree)
2803       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2804     else
2805       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2806   }
2807 
2808   /// See AbstractAttribute::getAsStr().
2809   const std::string getAsStr() const override {
2810     return isAssumedDead()
2811                ? "assumed-dead"
2812                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2813   }
2814 
2815 private:
2816   bool IsAssumedSideEffectFree;
2817 };
2818 
2819 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2820   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2821       : AAIsDeadValueImpl(IRP, A) {}
2822 
2823   /// See AbstractAttribute::updateImpl(...).
2824   ChangeStatus updateImpl(Attributor &A) override {
2825 
2826     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2827                               {Instruction::Ret});
2828 
2829     auto PredForCallSite = [&](AbstractCallSite ACS) {
2830       if (ACS.isCallbackCall() || !ACS.getInstruction())
2831         return false;
2832       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2833     };
2834 
2835     bool AllCallSitesKnown;
2836     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
2837                                 AllCallSitesKnown))
2838       return indicatePessimisticFixpoint();
2839 
2840     return ChangeStatus::UNCHANGED;
2841   }
2842 
2843   /// See AbstractAttribute::manifest(...).
2844   ChangeStatus manifest(Attributor &A) override {
2845     // TODO: Rewrite the signature to return void?
2846     bool AnyChange = false;
2847     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
2848     auto RetInstPred = [&](Instruction &I) {
2849       ReturnInst &RI = cast<ReturnInst>(I);
2850       if (!isa<UndefValue>(RI.getReturnValue()))
2851         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
2852       return true;
2853     };
2854     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
2855     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2856   }
2857 
2858   /// See AbstractAttribute::trackStatistics()
2859   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
2860 };
2861 
2862 struct AAIsDeadFunction : public AAIsDead {
2863   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2864 
2865   /// See AbstractAttribute::initialize(...).
2866   void initialize(Attributor &A) override {
2867     const Function *F = getAnchorScope();
2868     if (F && !F->isDeclaration()) {
2869       ToBeExploredFrom.insert(&F->getEntryBlock().front());
2870       assumeLive(A, F->getEntryBlock());
2871     }
2872   }
2873 
2874   /// See AbstractAttribute::getAsStr().
2875   const std::string getAsStr() const override {
2876     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
2877            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
2878            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
2879            std::to_string(KnownDeadEnds.size()) + "]";
2880   }
2881 
2882   /// See AbstractAttribute::manifest(...).
2883   ChangeStatus manifest(Attributor &A) override {
2884     assert(getState().isValidState() &&
2885            "Attempted to manifest an invalid state!");
2886 
2887     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
2888     Function &F = *getAnchorScope();
2889 
2890     if (AssumedLiveBlocks.empty()) {
2891       A.deleteAfterManifest(F);
2892       return ChangeStatus::CHANGED;
2893     }
2894 
2895     // Flag to determine if we can change an invoke to a call assuming the
2896     // callee is nounwind. This is not possible if the personality of the
2897     // function allows to catch asynchronous exceptions.
2898     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
2899 
2900     KnownDeadEnds.set_union(ToBeExploredFrom);
2901     for (const Instruction *DeadEndI : KnownDeadEnds) {
2902       auto *CB = dyn_cast<CallBase>(DeadEndI);
2903       if (!CB)
2904         continue;
2905       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
2906           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
2907           DepClassTy::OPTIONAL);
2908       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
2909       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
2910         continue;
2911 
2912       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
2913         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
2914       else
2915         A.changeToUnreachableAfterManifest(
2916             const_cast<Instruction *>(DeadEndI->getNextNode()));
2917       HasChanged = ChangeStatus::CHANGED;
2918     }
2919 
2920     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
2921     for (BasicBlock &BB : F)
2922       if (!AssumedLiveBlocks.count(&BB)) {
2923         A.deleteAfterManifest(BB);
2924         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
2925       }
2926 
2927     return HasChanged;
2928   }
2929 
2930   /// See AbstractAttribute::updateImpl(...).
2931   ChangeStatus updateImpl(Attributor &A) override;
2932 
2933   /// See AbstractAttribute::trackStatistics()
2934   void trackStatistics() const override {}
2935 
2936   /// Returns true if the function is assumed dead.
2937   bool isAssumedDead() const override { return false; }
2938 
2939   /// See AAIsDead::isKnownDead().
2940   bool isKnownDead() const override { return false; }
2941 
2942   /// See AAIsDead::isAssumedDead(BasicBlock *).
2943   bool isAssumedDead(const BasicBlock *BB) const override {
2944     assert(BB->getParent() == getAnchorScope() &&
2945            "BB must be in the same anchor scope function.");
2946 
2947     if (!getAssumed())
2948       return false;
2949     return !AssumedLiveBlocks.count(BB);
2950   }
2951 
2952   /// See AAIsDead::isKnownDead(BasicBlock *).
2953   bool isKnownDead(const BasicBlock *BB) const override {
2954     return getKnown() && isAssumedDead(BB);
2955   }
2956 
2957   /// See AAIsDead::isAssumed(Instruction *I).
2958   bool isAssumedDead(const Instruction *I) const override {
2959     assert(I->getParent()->getParent() == getAnchorScope() &&
2960            "Instruction must be in the same anchor scope function.");
2961 
2962     if (!getAssumed())
2963       return false;
2964 
2965     // If it is not in AssumedLiveBlocks then it for sure dead.
2966     // Otherwise, it can still be after noreturn call in a live block.
2967     if (!AssumedLiveBlocks.count(I->getParent()))
2968       return true;
2969 
2970     // If it is not after a liveness barrier it is live.
2971     const Instruction *PrevI = I->getPrevNode();
2972     while (PrevI) {
2973       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
2974         return true;
2975       PrevI = PrevI->getPrevNode();
2976     }
2977     return false;
2978   }
2979 
2980   /// See AAIsDead::isKnownDead(Instruction *I).
2981   bool isKnownDead(const Instruction *I) const override {
2982     return getKnown() && isAssumedDead(I);
2983   }
2984 
2985   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
2986   /// that internal function called from \p BB should now be looked at.
2987   bool assumeLive(Attributor &A, const BasicBlock &BB) {
2988     if (!AssumedLiveBlocks.insert(&BB).second)
2989       return false;
2990 
2991     // We assume that all of BB is (probably) live now and if there are calls to
2992     // internal functions we will assume that those are now live as well. This
2993     // is a performance optimization for blocks with calls to a lot of internal
2994     // functions. It can however cause dead functions to be treated as live.
2995     for (const Instruction &I : BB)
2996       if (const auto *CB = dyn_cast<CallBase>(&I))
2997         if (const Function *F = CB->getCalledFunction())
2998           if (F->hasLocalLinkage())
2999             A.markLiveInternalFunction(*F);
3000     return true;
3001   }
3002 
3003   /// Collection of instructions that need to be explored again, e.g., we
3004   /// did assume they do not transfer control to (one of their) successors.
3005   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3006 
3007   /// Collection of instructions that are known to not transfer control.
3008   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3009 
3010   /// Collection of all assumed live BasicBlocks.
3011   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3012 };
3013 
3014 static bool
3015 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3016                         AbstractAttribute &AA,
3017                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3018   const IRPosition &IPos = IRPosition::callsite_function(CB);
3019 
3020   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3021       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3022   if (NoReturnAA.isAssumedNoReturn())
3023     return !NoReturnAA.isKnownNoReturn();
3024   if (CB.isTerminator())
3025     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3026   else
3027     AliveSuccessors.push_back(CB.getNextNode());
3028   return false;
3029 }
3030 
3031 static bool
3032 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3033                         AbstractAttribute &AA,
3034                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3035   bool UsedAssumedInformation =
3036       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3037 
3038   // First, determine if we can change an invoke to a call assuming the
3039   // callee is nounwind. This is not possible if the personality of the
3040   // function allows to catch asynchronous exceptions.
3041   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3042     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3043   } else {
3044     const IRPosition &IPos = IRPosition::callsite_function(II);
3045     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3046         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3047     if (AANoUnw.isAssumedNoUnwind()) {
3048       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3049     } else {
3050       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3051     }
3052   }
3053   return UsedAssumedInformation;
3054 }
3055 
3056 static bool
3057 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3058                         AbstractAttribute &AA,
3059                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3060   bool UsedAssumedInformation = false;
3061   if (BI.getNumSuccessors() == 1) {
3062     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3063   } else {
3064     Optional<ConstantInt *> CI = getAssumedConstantInt(
3065         A, *BI.getCondition(), AA, UsedAssumedInformation);
3066     if (!CI.hasValue()) {
3067       // No value yet, assume both edges are dead.
3068     } else if (CI.getValue()) {
3069       const BasicBlock *SuccBB =
3070           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3071       AliveSuccessors.push_back(&SuccBB->front());
3072     } else {
3073       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3074       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3075       UsedAssumedInformation = false;
3076     }
3077   }
3078   return UsedAssumedInformation;
3079 }
3080 
3081 static bool
3082 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3083                         AbstractAttribute &AA,
3084                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3085   bool UsedAssumedInformation = false;
3086   Optional<ConstantInt *> CI =
3087       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3088   if (!CI.hasValue()) {
3089     // No value yet, assume all edges are dead.
3090   } else if (CI.getValue()) {
3091     for (auto &CaseIt : SI.cases()) {
3092       if (CaseIt.getCaseValue() == CI.getValue()) {
3093         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3094         return UsedAssumedInformation;
3095       }
3096     }
3097     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3098     return UsedAssumedInformation;
3099   } else {
3100     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3101       AliveSuccessors.push_back(&SuccBB->front());
3102   }
3103   return UsedAssumedInformation;
3104 }
3105 
3106 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3107   ChangeStatus Change = ChangeStatus::UNCHANGED;
3108 
3109   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3110                     << getAnchorScope()->size() << "] BBs and "
3111                     << ToBeExploredFrom.size() << " exploration points and "
3112                     << KnownDeadEnds.size() << " known dead ends\n");
3113 
3114   // Copy and clear the list of instructions we need to explore from. It is
3115   // refilled with instructions the next update has to look at.
3116   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3117                                                ToBeExploredFrom.end());
3118   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3119 
3120   SmallVector<const Instruction *, 8> AliveSuccessors;
3121   while (!Worklist.empty()) {
3122     const Instruction *I = Worklist.pop_back_val();
3123     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3124 
3125     AliveSuccessors.clear();
3126 
3127     bool UsedAssumedInformation = false;
3128     switch (I->getOpcode()) {
3129     // TODO: look for (assumed) UB to backwards propagate "deadness".
3130     default:
3131       if (I->isTerminator()) {
3132         for (const BasicBlock *SuccBB : successors(I->getParent()))
3133           AliveSuccessors.push_back(&SuccBB->front());
3134       } else {
3135         AliveSuccessors.push_back(I->getNextNode());
3136       }
3137       break;
3138     case Instruction::Call:
3139       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3140                                                        *this, AliveSuccessors);
3141       break;
3142     case Instruction::Invoke:
3143       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3144                                                        *this, AliveSuccessors);
3145       break;
3146     case Instruction::Br:
3147       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3148                                                        *this, AliveSuccessors);
3149       break;
3150     case Instruction::Switch:
3151       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3152                                                        *this, AliveSuccessors);
3153       break;
3154     }
3155 
3156     if (UsedAssumedInformation) {
3157       NewToBeExploredFrom.insert(I);
3158     } else {
3159       Change = ChangeStatus::CHANGED;
3160       if (AliveSuccessors.empty() ||
3161           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3162         KnownDeadEnds.insert(I);
3163     }
3164 
3165     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3166                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3167                       << UsedAssumedInformation << "\n");
3168 
3169     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3170       if (!I->isTerminator()) {
3171         assert(AliveSuccessors.size() == 1 &&
3172                "Non-terminator expected to have a single successor!");
3173         Worklist.push_back(AliveSuccessor);
3174       } else {
3175         if (assumeLive(A, *AliveSuccessor->getParent()))
3176           Worklist.push_back(AliveSuccessor);
3177       }
3178     }
3179   }
3180 
3181   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3182 
3183   // If we know everything is live there is no need to query for liveness.
3184   // Instead, indicating a pessimistic fixpoint will cause the state to be
3185   // "invalid" and all queries to be answered conservatively without lookups.
3186   // To be in this state we have to (1) finished the exploration and (3) not
3187   // discovered any non-trivial dead end and (2) not ruled unreachable code
3188   // dead.
3189   if (ToBeExploredFrom.empty() &&
3190       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3191       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3192         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3193       }))
3194     return indicatePessimisticFixpoint();
3195   return Change;
3196 }
3197 
3198 /// Liveness information for a call sites.
3199 struct AAIsDeadCallSite final : AAIsDeadFunction {
3200   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3201       : AAIsDeadFunction(IRP, A) {}
3202 
3203   /// See AbstractAttribute::initialize(...).
3204   void initialize(Attributor &A) override {
3205     // TODO: Once we have call site specific value information we can provide
3206     //       call site specific liveness information and then it makes
3207     //       sense to specialize attributes for call sites instead of
3208     //       redirecting requests to the callee.
3209     llvm_unreachable("Abstract attributes for liveness are not "
3210                      "supported for call sites yet!");
3211   }
3212 
3213   /// See AbstractAttribute::updateImpl(...).
3214   ChangeStatus updateImpl(Attributor &A) override {
3215     return indicatePessimisticFixpoint();
3216   }
3217 
3218   /// See AbstractAttribute::trackStatistics()
3219   void trackStatistics() const override {}
3220 };
3221 
3222 /// -------------------- Dereferenceable Argument Attribute --------------------
3223 
3224 template <>
3225 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3226                                                      const DerefState &R) {
3227   ChangeStatus CS0 =
3228       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3229   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3230   return CS0 | CS1;
3231 }
3232 
3233 struct AADereferenceableImpl : AADereferenceable {
3234   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3235       : AADereferenceable(IRP, A) {}
3236   using StateType = DerefState;
3237 
3238   /// See AbstractAttribute::initialize(...).
3239   void initialize(Attributor &A) override {
3240     SmallVector<Attribute, 4> Attrs;
3241     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3242              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3243     for (const Attribute &Attr : Attrs)
3244       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3245 
3246     const IRPosition &IRP = this->getIRPosition();
3247     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3248                                        /* TrackDependence */ false);
3249 
3250     bool CanBeNull;
3251     takeKnownDerefBytesMaximum(
3252         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3253             A.getDataLayout(), CanBeNull));
3254 
3255     bool IsFnInterface = IRP.isFnInterfaceKind();
3256     Function *FnScope = IRP.getAnchorScope();
3257     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3258       indicatePessimisticFixpoint();
3259       return;
3260     }
3261 
3262     if (Instruction *CtxI = getCtxI())
3263       followUsesInMBEC(*this, A, getState(), *CtxI);
3264   }
3265 
3266   /// See AbstractAttribute::getState()
3267   /// {
3268   StateType &getState() override { return *this; }
3269   const StateType &getState() const override { return *this; }
3270   /// }
3271 
3272   /// Helper function for collecting accessed bytes in must-be-executed-context
3273   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3274                               DerefState &State) {
3275     const Value *UseV = U->get();
3276     if (!UseV->getType()->isPointerTy())
3277       return;
3278 
3279     Type *PtrTy = UseV->getType();
3280     const DataLayout &DL = A.getDataLayout();
3281     int64_t Offset;
3282     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3283             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3284       if (Base == &getAssociatedValue() &&
3285           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3286         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3287         State.addAccessedBytes(Offset, Size);
3288       }
3289     }
3290     return;
3291   }
3292 
3293   /// See followUsesInMBEC
3294   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3295                        AADereferenceable::StateType &State) {
3296     bool IsNonNull = false;
3297     bool TrackUse = false;
3298     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3299         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3300 
3301     addAccessedBytesForUse(A, U, I, State);
3302     State.takeKnownDerefBytesMaximum(DerefBytes);
3303     return TrackUse;
3304   }
3305 
3306   /// See AbstractAttribute::manifest(...).
3307   ChangeStatus manifest(Attributor &A) override {
3308     ChangeStatus Change = AADereferenceable::manifest(A);
3309     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3310       removeAttrs({Attribute::DereferenceableOrNull});
3311       return ChangeStatus::CHANGED;
3312     }
3313     return Change;
3314   }
3315 
3316   void getDeducedAttributes(LLVMContext &Ctx,
3317                             SmallVectorImpl<Attribute> &Attrs) const override {
3318     // TODO: Add *_globally support
3319     if (isAssumedNonNull())
3320       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3321           Ctx, getAssumedDereferenceableBytes()));
3322     else
3323       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3324           Ctx, getAssumedDereferenceableBytes()));
3325   }
3326 
3327   /// See AbstractAttribute::getAsStr().
3328   const std::string getAsStr() const override {
3329     if (!getAssumedDereferenceableBytes())
3330       return "unknown-dereferenceable";
3331     return std::string("dereferenceable") +
3332            (isAssumedNonNull() ? "" : "_or_null") +
3333            (isAssumedGlobal() ? "_globally" : "") + "<" +
3334            std::to_string(getKnownDereferenceableBytes()) + "-" +
3335            std::to_string(getAssumedDereferenceableBytes()) + ">";
3336   }
3337 };
3338 
3339 /// Dereferenceable attribute for a floating value.
3340 struct AADereferenceableFloating : AADereferenceableImpl {
3341   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3342       : AADereferenceableImpl(IRP, A) {}
3343 
3344   /// See AbstractAttribute::updateImpl(...).
3345   ChangeStatus updateImpl(Attributor &A) override {
3346     const DataLayout &DL = A.getDataLayout();
3347 
3348     auto VisitValueCB = [&](Value &V, const Instruction *, DerefState &T,
3349                             bool Stripped) -> bool {
3350       unsigned IdxWidth =
3351           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3352       APInt Offset(IdxWidth, 0);
3353       const Value *Base =
3354           V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
3355 
3356       const auto &AA =
3357           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3358       int64_t DerefBytes = 0;
3359       if (!Stripped && this == &AA) {
3360         // Use IR information if we did not strip anything.
3361         // TODO: track globally.
3362         bool CanBeNull;
3363         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3364         T.GlobalState.indicatePessimisticFixpoint();
3365       } else {
3366         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3367         DerefBytes = DS.DerefBytesState.getAssumed();
3368         T.GlobalState &= DS.GlobalState;
3369       }
3370 
3371       // TODO: Use `AAConstantRange` to infer dereferenceable bytes.
3372 
3373       // For now we do not try to "increase" dereferenceability due to negative
3374       // indices as we first have to come up with code to deal with loops and
3375       // for overflows of the dereferenceable bytes.
3376       int64_t OffsetSExt = Offset.getSExtValue();
3377       if (OffsetSExt < 0)
3378         OffsetSExt = 0;
3379 
3380       T.takeAssumedDerefBytesMinimum(
3381           std::max(int64_t(0), DerefBytes - OffsetSExt));
3382 
3383       if (this == &AA) {
3384         if (!Stripped) {
3385           // If nothing was stripped IR information is all we got.
3386           T.takeKnownDerefBytesMaximum(
3387               std::max(int64_t(0), DerefBytes - OffsetSExt));
3388           T.indicatePessimisticFixpoint();
3389         } else if (OffsetSExt > 0) {
3390           // If something was stripped but there is circular reasoning we look
3391           // for the offset. If it is positive we basically decrease the
3392           // dereferenceable bytes in a circluar loop now, which will simply
3393           // drive them down to the known value in a very slow way which we
3394           // can accelerate.
3395           T.indicatePessimisticFixpoint();
3396         }
3397       }
3398 
3399       return T.isValidState();
3400     };
3401 
3402     DerefState T;
3403     if (!genericValueTraversal<AADereferenceable, DerefState>(
3404             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3405       return indicatePessimisticFixpoint();
3406 
3407     return clampStateAndIndicateChange(getState(), T);
3408   }
3409 
3410   /// See AbstractAttribute::trackStatistics()
3411   void trackStatistics() const override {
3412     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3413   }
3414 };
3415 
3416 /// Dereferenceable attribute for a return value.
3417 struct AADereferenceableReturned final
3418     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3419   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3420       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3421             IRP, A) {}
3422 
3423   /// See AbstractAttribute::trackStatistics()
3424   void trackStatistics() const override {
3425     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3426   }
3427 };
3428 
3429 /// Dereferenceable attribute for an argument
3430 struct AADereferenceableArgument final
3431     : AAArgumentFromCallSiteArguments<AADereferenceable,
3432                                       AADereferenceableImpl> {
3433   using Base =
3434       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3435   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3436       : Base(IRP, A) {}
3437 
3438   /// See AbstractAttribute::trackStatistics()
3439   void trackStatistics() const override {
3440     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3441   }
3442 };
3443 
3444 /// Dereferenceable attribute for a call site argument.
3445 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3446   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3447       : AADereferenceableFloating(IRP, A) {}
3448 
3449   /// See AbstractAttribute::trackStatistics()
3450   void trackStatistics() const override {
3451     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3452   }
3453 };
3454 
3455 /// Dereferenceable attribute deduction for a call site return value.
3456 struct AADereferenceableCallSiteReturned final
3457     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3458   using Base =
3459       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3460   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3461       : Base(IRP, A) {}
3462 
3463   /// See AbstractAttribute::trackStatistics()
3464   void trackStatistics() const override {
3465     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3466   }
3467 };
3468 
3469 // ------------------------ Align Argument Attribute ------------------------
3470 
3471 /// \p Ptr is accessed so we can get alignment information if the ABI requires
3472 /// the element type to be aligned.
3473 static MaybeAlign getKnownAlignmentFromAccessedPtr(const Value *Ptr,
3474                                                    const DataLayout &DL) {
3475   MaybeAlign KnownAlignment = Ptr->getPointerAlignment(DL);
3476   Type *ElementTy = Ptr->getType()->getPointerElementType();
3477   if (ElementTy->isSized())
3478     KnownAlignment = max(KnownAlignment, DL.getABITypeAlign(ElementTy));
3479   return KnownAlignment;
3480 }
3481 
3482 static unsigned getKnownAlignForUse(Attributor &A,
3483                                     AbstractAttribute &QueryingAA,
3484                                     Value &AssociatedValue, const Use *U,
3485                                     const Instruction *I, bool &TrackUse) {
3486   // We need to follow common pointer manipulation uses to the accesses they
3487   // feed into.
3488   if (isa<CastInst>(I)) {
3489     // Follow all but ptr2int casts.
3490     TrackUse = !isa<PtrToIntInst>(I);
3491     return 0;
3492   }
3493   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3494     if (GEP->hasAllConstantIndices()) {
3495       TrackUse = true;
3496       return 0;
3497     }
3498   }
3499 
3500   MaybeAlign MA;
3501   if (const auto *CB = dyn_cast<CallBase>(I)) {
3502     if (CB->isBundleOperand(U) || CB->isCallee(U))
3503       return 0;
3504 
3505     unsigned ArgNo = CB->getArgOperandNo(U);
3506     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3507     // As long as we only use known information there is no need to track
3508     // dependences here.
3509     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3510                                         /* TrackDependence */ false);
3511     MA = MaybeAlign(AlignAA.getKnownAlign());
3512   }
3513 
3514   const DataLayout &DL = A.getDataLayout();
3515   const Value *UseV = U->get();
3516   if (auto *SI = dyn_cast<StoreInst>(I)) {
3517     if (SI->getPointerOperand() == UseV) {
3518       if (unsigned SIAlign = SI->getAlignment())
3519         MA = MaybeAlign(SIAlign);
3520       else
3521         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3522     }
3523   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3524     if (LI->getPointerOperand() == UseV) {
3525       if (unsigned LIAlign = LI->getAlignment())
3526         MA = MaybeAlign(LIAlign);
3527       else
3528         MA = getKnownAlignmentFromAccessedPtr(UseV, DL);
3529     }
3530   }
3531 
3532   if (!MA.hasValue() || MA <= 1)
3533     return 0;
3534 
3535   unsigned Alignment = MA->value();
3536   int64_t Offset;
3537 
3538   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3539     if (Base == &AssociatedValue) {
3540       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3541       // So we can say that the maximum power of two which is a divisor of
3542       // gcd(Offset, Alignment) is an alignment.
3543 
3544       uint32_t gcd =
3545           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3546       Alignment = llvm::PowerOf2Floor(gcd);
3547     }
3548   }
3549 
3550   return Alignment;
3551 }
3552 
3553 struct AAAlignImpl : AAAlign {
3554   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3555 
3556   /// See AbstractAttribute::initialize(...).
3557   void initialize(Attributor &A) override {
3558     SmallVector<Attribute, 4> Attrs;
3559     getAttrs({Attribute::Alignment}, Attrs);
3560     for (const Attribute &Attr : Attrs)
3561       takeKnownMaximum(Attr.getValueAsInt());
3562 
3563     Value &V = getAssociatedValue();
3564     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3565     //       use of the function pointer. This was caused by D73131. We want to
3566     //       avoid this for function pointers especially because we iterate
3567     //       their uses and int2ptr is not handled. It is not a correctness
3568     //       problem though!
3569     if (!V.getType()->getPointerElementType()->isFunctionTy())
3570       takeKnownMaximum(
3571           V.getPointerAlignment(A.getDataLayout()).valueOrOne().value());
3572 
3573     if (getIRPosition().isFnInterfaceKind() &&
3574         (!getAnchorScope() ||
3575          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3576       indicatePessimisticFixpoint();
3577       return;
3578     }
3579 
3580     if (Instruction *CtxI = getCtxI())
3581       followUsesInMBEC(*this, A, getState(), *CtxI);
3582   }
3583 
3584   /// See AbstractAttribute::manifest(...).
3585   ChangeStatus manifest(Attributor &A) override {
3586     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3587 
3588     // Check for users that allow alignment annotations.
3589     Value &AssociatedValue = getAssociatedValue();
3590     for (const Use &U : AssociatedValue.uses()) {
3591       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3592         if (SI->getPointerOperand() == &AssociatedValue)
3593           if (SI->getAlignment() < getAssumedAlign()) {
3594             STATS_DECLTRACK(AAAlign, Store,
3595                             "Number of times alignment added to a store");
3596             SI->setAlignment(Align(getAssumedAlign()));
3597             LoadStoreChanged = ChangeStatus::CHANGED;
3598           }
3599       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3600         if (LI->getPointerOperand() == &AssociatedValue)
3601           if (LI->getAlignment() < getAssumedAlign()) {
3602             LI->setAlignment(Align(getAssumedAlign()));
3603             STATS_DECLTRACK(AAAlign, Load,
3604                             "Number of times alignment added to a load");
3605             LoadStoreChanged = ChangeStatus::CHANGED;
3606           }
3607       }
3608     }
3609 
3610     ChangeStatus Changed = AAAlign::manifest(A);
3611 
3612     MaybeAlign InheritAlign =
3613         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3614     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3615       return LoadStoreChanged;
3616     return Changed | LoadStoreChanged;
3617   }
3618 
3619   // TODO: Provide a helper to determine the implied ABI alignment and check in
3620   //       the existing manifest method and a new one for AAAlignImpl that value
3621   //       to avoid making the alignment explicit if it did not improve.
3622 
3623   /// See AbstractAttribute::getDeducedAttributes
3624   virtual void
3625   getDeducedAttributes(LLVMContext &Ctx,
3626                        SmallVectorImpl<Attribute> &Attrs) const override {
3627     if (getAssumedAlign() > 1)
3628       Attrs.emplace_back(
3629           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3630   }
3631 
3632   /// See followUsesInMBEC
3633   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3634                        AAAlign::StateType &State) {
3635     bool TrackUse = false;
3636 
3637     unsigned int KnownAlign =
3638         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3639     State.takeKnownMaximum(KnownAlign);
3640 
3641     return TrackUse;
3642   }
3643 
3644   /// See AbstractAttribute::getAsStr().
3645   const std::string getAsStr() const override {
3646     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3647                                 "-" + std::to_string(getAssumedAlign()) + ">")
3648                              : "unknown-align";
3649   }
3650 };
3651 
3652 /// Align attribute for a floating value.
3653 struct AAAlignFloating : AAAlignImpl {
3654   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3655 
3656   /// See AbstractAttribute::updateImpl(...).
3657   ChangeStatus updateImpl(Attributor &A) override {
3658     const DataLayout &DL = A.getDataLayout();
3659 
3660     auto VisitValueCB = [&](Value &V, const Instruction *,
3661                             AAAlign::StateType &T, bool Stripped) -> bool {
3662       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3663       if (!Stripped && this == &AA) {
3664         // Use only IR information if we did not strip anything.
3665         const MaybeAlign PA = V.getPointerAlignment(DL);
3666         T.takeKnownMaximum(PA ? PA->value() : 0);
3667         T.indicatePessimisticFixpoint();
3668       } else {
3669         // Use abstract attribute information.
3670         const AAAlign::StateType &DS =
3671             static_cast<const AAAlign::StateType &>(AA.getState());
3672         T ^= DS;
3673       }
3674       return T.isValidState();
3675     };
3676 
3677     StateType T;
3678     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3679                                                    VisitValueCB, getCtxI()))
3680       return indicatePessimisticFixpoint();
3681 
3682     // TODO: If we know we visited all incoming values, thus no are assumed
3683     // dead, we can take the known information from the state T.
3684     return clampStateAndIndicateChange(getState(), T);
3685   }
3686 
3687   /// See AbstractAttribute::trackStatistics()
3688   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3689 };
3690 
3691 /// Align attribute for function return value.
3692 struct AAAlignReturned final
3693     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3694   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3695       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3696 
3697   /// See AbstractAttribute::trackStatistics()
3698   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3699 };
3700 
3701 /// Align attribute for function argument.
3702 struct AAAlignArgument final
3703     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3704   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3705   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3706 
3707   /// See AbstractAttribute::manifest(...).
3708   ChangeStatus manifest(Attributor &A) override {
3709     // If the associated argument is involved in a must-tail call we give up
3710     // because we would need to keep the argument alignments of caller and
3711     // callee in-sync. Just does not seem worth the trouble right now.
3712     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3713       return ChangeStatus::UNCHANGED;
3714     return Base::manifest(A);
3715   }
3716 
3717   /// See AbstractAttribute::trackStatistics()
3718   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3719 };
3720 
3721 struct AAAlignCallSiteArgument final : AAAlignFloating {
3722   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3723       : AAAlignFloating(IRP, A) {}
3724 
3725   /// See AbstractAttribute::manifest(...).
3726   ChangeStatus manifest(Attributor &A) override {
3727     // If the associated argument is involved in a must-tail call we give up
3728     // because we would need to keep the argument alignments of caller and
3729     // callee in-sync. Just does not seem worth the trouble right now.
3730     if (Argument *Arg = getAssociatedArgument())
3731       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3732         return ChangeStatus::UNCHANGED;
3733     ChangeStatus Changed = AAAlignImpl::manifest(A);
3734     MaybeAlign InheritAlign =
3735         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3736     if (InheritAlign.valueOrOne() >= getAssumedAlign())
3737       Changed = ChangeStatus::UNCHANGED;
3738     return Changed;
3739   }
3740 
3741   /// See AbstractAttribute::updateImpl(Attributor &A).
3742   ChangeStatus updateImpl(Attributor &A) override {
3743     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3744     if (Argument *Arg = getAssociatedArgument()) {
3745       // We only take known information from the argument
3746       // so we do not need to track a dependence.
3747       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3748           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3749       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3750     }
3751     return Changed;
3752   }
3753 
3754   /// See AbstractAttribute::trackStatistics()
3755   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3756 };
3757 
3758 /// Align attribute deduction for a call site return value.
3759 struct AAAlignCallSiteReturned final
3760     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3761   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3762   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3763       : Base(IRP, A) {}
3764 
3765   /// See AbstractAttribute::initialize(...).
3766   void initialize(Attributor &A) override {
3767     Base::initialize(A);
3768     Function *F = getAssociatedFunction();
3769     if (!F)
3770       indicatePessimisticFixpoint();
3771   }
3772 
3773   /// See AbstractAttribute::trackStatistics()
3774   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3775 };
3776 
3777 /// ------------------ Function No-Return Attribute ----------------------------
3778 struct AANoReturnImpl : public AANoReturn {
3779   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3780 
3781   /// See AbstractAttribute::initialize(...).
3782   void initialize(Attributor &A) override {
3783     AANoReturn::initialize(A);
3784     Function *F = getAssociatedFunction();
3785     if (!F)
3786       indicatePessimisticFixpoint();
3787   }
3788 
3789   /// See AbstractAttribute::getAsStr().
3790   const std::string getAsStr() const override {
3791     return getAssumed() ? "noreturn" : "may-return";
3792   }
3793 
3794   /// See AbstractAttribute::updateImpl(Attributor &A).
3795   virtual ChangeStatus updateImpl(Attributor &A) override {
3796     auto CheckForNoReturn = [](Instruction &) { return false; };
3797     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3798                                    {(unsigned)Instruction::Ret}))
3799       return indicatePessimisticFixpoint();
3800     return ChangeStatus::UNCHANGED;
3801   }
3802 };
3803 
3804 struct AANoReturnFunction final : AANoReturnImpl {
3805   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3806       : AANoReturnImpl(IRP, A) {}
3807 
3808   /// See AbstractAttribute::trackStatistics()
3809   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3810 };
3811 
3812 /// NoReturn attribute deduction for a call sites.
3813 struct AANoReturnCallSite final : AANoReturnImpl {
3814   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3815       : AANoReturnImpl(IRP, A) {}
3816 
3817   /// See AbstractAttribute::updateImpl(...).
3818   ChangeStatus updateImpl(Attributor &A) override {
3819     // TODO: Once we have call site specific value information we can provide
3820     //       call site specific liveness information and then it makes
3821     //       sense to specialize attributes for call sites arguments instead of
3822     //       redirecting requests to the callee argument.
3823     Function *F = getAssociatedFunction();
3824     const IRPosition &FnPos = IRPosition::function(*F);
3825     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3826     return clampStateAndIndicateChange(
3827         getState(),
3828         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3829   }
3830 
3831   /// See AbstractAttribute::trackStatistics()
3832   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3833 };
3834 
3835 /// ----------------------- Variable Capturing ---------------------------------
3836 
3837 /// A class to hold the state of for no-capture attributes.
3838 struct AANoCaptureImpl : public AANoCapture {
3839   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3840 
3841   /// See AbstractAttribute::initialize(...).
3842   void initialize(Attributor &A) override {
3843     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3844       indicateOptimisticFixpoint();
3845       return;
3846     }
3847     Function *AnchorScope = getAnchorScope();
3848     if (isFnInterfaceKind() &&
3849         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3850       indicatePessimisticFixpoint();
3851       return;
3852     }
3853 
3854     // You cannot "capture" null in the default address space.
3855     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
3856         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
3857       indicateOptimisticFixpoint();
3858       return;
3859     }
3860 
3861     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
3862 
3863     // Check what state the associated function can actually capture.
3864     if (F)
3865       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
3866     else
3867       indicatePessimisticFixpoint();
3868   }
3869 
3870   /// See AbstractAttribute::updateImpl(...).
3871   ChangeStatus updateImpl(Attributor &A) override;
3872 
3873   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
3874   virtual void
3875   getDeducedAttributes(LLVMContext &Ctx,
3876                        SmallVectorImpl<Attribute> &Attrs) const override {
3877     if (!isAssumedNoCaptureMaybeReturned())
3878       return;
3879 
3880     if (getArgNo() >= 0) {
3881       if (isAssumedNoCapture())
3882         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
3883       else if (ManifestInternal)
3884         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
3885     }
3886   }
3887 
3888   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
3889   /// depending on the ability of the function associated with \p IRP to capture
3890   /// state in memory and through "returning/throwing", respectively.
3891   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
3892                                                    const Function &F,
3893                                                    BitIntegerState &State) {
3894     // TODO: Once we have memory behavior attributes we should use them here.
3895 
3896     // If we know we cannot communicate or write to memory, we do not care about
3897     // ptr2int anymore.
3898     if (F.onlyReadsMemory() && F.doesNotThrow() &&
3899         F.getReturnType()->isVoidTy()) {
3900       State.addKnownBits(NO_CAPTURE);
3901       return;
3902     }
3903 
3904     // A function cannot capture state in memory if it only reads memory, it can
3905     // however return/throw state and the state might be influenced by the
3906     // pointer value, e.g., loading from a returned pointer might reveal a bit.
3907     if (F.onlyReadsMemory())
3908       State.addKnownBits(NOT_CAPTURED_IN_MEM);
3909 
3910     // A function cannot communicate state back if it does not through
3911     // exceptions and doesn not return values.
3912     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
3913       State.addKnownBits(NOT_CAPTURED_IN_RET);
3914 
3915     // Check existing "returned" attributes.
3916     int ArgNo = IRP.getArgNo();
3917     if (F.doesNotThrow() && ArgNo >= 0) {
3918       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
3919         if (F.hasParamAttribute(u, Attribute::Returned)) {
3920           if (u == unsigned(ArgNo))
3921             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
3922           else if (F.onlyReadsMemory())
3923             State.addKnownBits(NO_CAPTURE);
3924           else
3925             State.addKnownBits(NOT_CAPTURED_IN_RET);
3926           break;
3927         }
3928     }
3929   }
3930 
3931   /// See AbstractState::getAsStr().
3932   const std::string getAsStr() const override {
3933     if (isKnownNoCapture())
3934       return "known not-captured";
3935     if (isAssumedNoCapture())
3936       return "assumed not-captured";
3937     if (isKnownNoCaptureMaybeReturned())
3938       return "known not-captured-maybe-returned";
3939     if (isAssumedNoCaptureMaybeReturned())
3940       return "assumed not-captured-maybe-returned";
3941     return "assumed-captured";
3942   }
3943 };
3944 
3945 /// Attributor-aware capture tracker.
3946 struct AACaptureUseTracker final : public CaptureTracker {
3947 
3948   /// Create a capture tracker that can lookup in-flight abstract attributes
3949   /// through the Attributor \p A.
3950   ///
3951   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
3952   /// search is stopped. If a use leads to a return instruction,
3953   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
3954   /// If a use leads to a ptr2int which may capture the value,
3955   /// \p CapturedInInteger is set. If a use is found that is currently assumed
3956   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
3957   /// set. All values in \p PotentialCopies are later tracked as well. For every
3958   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
3959   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
3960   /// conservatively set to true.
3961   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
3962                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
3963                       SmallVectorImpl<const Value *> &PotentialCopies,
3964                       unsigned &RemainingUsesToExplore)
3965       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
3966         PotentialCopies(PotentialCopies),
3967         RemainingUsesToExplore(RemainingUsesToExplore) {}
3968 
3969   /// Determine if \p V maybe captured. *Also updates the state!*
3970   bool valueMayBeCaptured(const Value *V) {
3971     if (V->getType()->isPointerTy()) {
3972       PointerMayBeCaptured(V, this);
3973     } else {
3974       State.indicatePessimisticFixpoint();
3975     }
3976     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
3977   }
3978 
3979   /// See CaptureTracker::tooManyUses().
3980   void tooManyUses() override {
3981     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
3982   }
3983 
3984   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
3985     if (CaptureTracker::isDereferenceableOrNull(O, DL))
3986       return true;
3987     const auto &DerefAA = A.getAAFor<AADereferenceable>(
3988         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
3989         DepClassTy::OPTIONAL);
3990     return DerefAA.getAssumedDereferenceableBytes();
3991   }
3992 
3993   /// See CaptureTracker::captured(...).
3994   bool captured(const Use *U) override {
3995     Instruction *UInst = cast<Instruction>(U->getUser());
3996     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
3997                       << "\n");
3998 
3999     // Because we may reuse the tracker multiple times we keep track of the
4000     // number of explored uses ourselves as well.
4001     if (RemainingUsesToExplore-- == 0) {
4002       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4003       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4004                           /* Return */ true);
4005     }
4006 
4007     // Deal with ptr2int by following uses.
4008     if (isa<PtrToIntInst>(UInst)) {
4009       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4010       return valueMayBeCaptured(UInst);
4011     }
4012 
4013     // Explicitly catch return instructions.
4014     if (isa<ReturnInst>(UInst))
4015       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4016                           /* Return */ true);
4017 
4018     // For now we only use special logic for call sites. However, the tracker
4019     // itself knows about a lot of other non-capturing cases already.
4020     auto *CB = dyn_cast<CallBase>(UInst);
4021     if (!CB || !CB->isArgOperand(U))
4022       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4023                           /* Return */ true);
4024 
4025     unsigned ArgNo = CB->getArgOperandNo(U);
4026     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4027     // If we have a abstract no-capture attribute for the argument we can use
4028     // it to justify a non-capture attribute here. This allows recursion!
4029     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4030     if (ArgNoCaptureAA.isAssumedNoCapture())
4031       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4032                           /* Return */ false);
4033     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4034       addPotentialCopy(*CB);
4035       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4036                           /* Return */ false);
4037     }
4038 
4039     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4040     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4041                         /* Return */ true);
4042   }
4043 
4044   /// Register \p CS as potential copy of the value we are checking.
4045   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4046 
4047   /// See CaptureTracker::shouldExplore(...).
4048   bool shouldExplore(const Use *U) override {
4049     // Check liveness and ignore droppable users.
4050     return !U->getUser()->isDroppable() &&
4051            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4052   }
4053 
4054   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4055   /// \p CapturedInRet, then return the appropriate value for use in the
4056   /// CaptureTracker::captured() interface.
4057   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4058                     bool CapturedInRet) {
4059     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4060                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4061     if (CapturedInMem)
4062       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4063     if (CapturedInInt)
4064       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4065     if (CapturedInRet)
4066       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4067     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4068   }
4069 
4070 private:
4071   /// The attributor providing in-flight abstract attributes.
4072   Attributor &A;
4073 
4074   /// The abstract attribute currently updated.
4075   AANoCapture &NoCaptureAA;
4076 
4077   /// The abstract liveness state.
4078   const AAIsDead &IsDeadAA;
4079 
4080   /// The state currently updated.
4081   AANoCapture::StateType &State;
4082 
4083   /// Set of potential copies of the tracked value.
4084   SmallVectorImpl<const Value *> &PotentialCopies;
4085 
4086   /// Global counter to limit the number of explored uses.
4087   unsigned &RemainingUsesToExplore;
4088 };
4089 
4090 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4091   const IRPosition &IRP = getIRPosition();
4092   const Value *V =
4093       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4094   if (!V)
4095     return indicatePessimisticFixpoint();
4096 
4097   const Function *F =
4098       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4099   assert(F && "Expected a function!");
4100   const IRPosition &FnPos = IRPosition::function(*F);
4101   const auto &IsDeadAA =
4102       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4103 
4104   AANoCapture::StateType T;
4105 
4106   // Readonly means we cannot capture through memory.
4107   const auto &FnMemAA =
4108       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4109   if (FnMemAA.isAssumedReadOnly()) {
4110     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4111     if (FnMemAA.isKnownReadOnly())
4112       addKnownBits(NOT_CAPTURED_IN_MEM);
4113     else
4114       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4115   }
4116 
4117   // Make sure all returned values are different than the underlying value.
4118   // TODO: we could do this in a more sophisticated way inside
4119   //       AAReturnedValues, e.g., track all values that escape through returns
4120   //       directly somehow.
4121   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4122     bool SeenConstant = false;
4123     for (auto &It : RVAA.returned_values()) {
4124       if (isa<Constant>(It.first)) {
4125         if (SeenConstant)
4126           return false;
4127         SeenConstant = true;
4128       } else if (!isa<Argument>(It.first) ||
4129                  It.first == getAssociatedArgument())
4130         return false;
4131     }
4132     return true;
4133   };
4134 
4135   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4136       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4137   if (NoUnwindAA.isAssumedNoUnwind()) {
4138     bool IsVoidTy = F->getReturnType()->isVoidTy();
4139     const AAReturnedValues *RVAA =
4140         IsVoidTy ? nullptr
4141                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4142                                                  /* TrackDependence */ true,
4143                                                  DepClassTy::OPTIONAL);
4144     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4145       T.addKnownBits(NOT_CAPTURED_IN_RET);
4146       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4147         return ChangeStatus::UNCHANGED;
4148       if (NoUnwindAA.isKnownNoUnwind() &&
4149           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4150         addKnownBits(NOT_CAPTURED_IN_RET);
4151         if (isKnown(NOT_CAPTURED_IN_MEM))
4152           return indicateOptimisticFixpoint();
4153       }
4154     }
4155   }
4156 
4157   // Use the CaptureTracker interface and logic with the specialized tracker,
4158   // defined in AACaptureUseTracker, that can look at in-flight abstract
4159   // attributes and directly updates the assumed state.
4160   SmallVector<const Value *, 4> PotentialCopies;
4161   unsigned RemainingUsesToExplore =
4162       getDefaultMaxUsesToExploreForCaptureTracking();
4163   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4164                               RemainingUsesToExplore);
4165 
4166   // Check all potential copies of the associated value until we can assume
4167   // none will be captured or we have to assume at least one might be.
4168   unsigned Idx = 0;
4169   PotentialCopies.push_back(V);
4170   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4171     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4172 
4173   AANoCapture::StateType &S = getState();
4174   auto Assumed = S.getAssumed();
4175   S.intersectAssumedBits(T.getAssumed());
4176   if (!isAssumedNoCaptureMaybeReturned())
4177     return indicatePessimisticFixpoint();
4178   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4179                                    : ChangeStatus::CHANGED;
4180 }
4181 
4182 /// NoCapture attribute for function arguments.
4183 struct AANoCaptureArgument final : AANoCaptureImpl {
4184   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4185       : AANoCaptureImpl(IRP, A) {}
4186 
4187   /// See AbstractAttribute::trackStatistics()
4188   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4189 };
4190 
4191 /// NoCapture attribute for call site arguments.
4192 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4193   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4194       : AANoCaptureImpl(IRP, A) {}
4195 
4196   /// See AbstractAttribute::initialize(...).
4197   void initialize(Attributor &A) override {
4198     if (Argument *Arg = getAssociatedArgument())
4199       if (Arg->hasByValAttr())
4200         indicateOptimisticFixpoint();
4201     AANoCaptureImpl::initialize(A);
4202   }
4203 
4204   /// See AbstractAttribute::updateImpl(...).
4205   ChangeStatus updateImpl(Attributor &A) override {
4206     // TODO: Once we have call site specific value information we can provide
4207     //       call site specific liveness information and then it makes
4208     //       sense to specialize attributes for call sites arguments instead of
4209     //       redirecting requests to the callee argument.
4210     Argument *Arg = getAssociatedArgument();
4211     if (!Arg)
4212       return indicatePessimisticFixpoint();
4213     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4214     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4215     return clampStateAndIndicateChange(
4216         getState(),
4217         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4218   }
4219 
4220   /// See AbstractAttribute::trackStatistics()
4221   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4222 };
4223 
4224 /// NoCapture attribute for floating values.
4225 struct AANoCaptureFloating final : AANoCaptureImpl {
4226   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4227       : AANoCaptureImpl(IRP, A) {}
4228 
4229   /// See AbstractAttribute::trackStatistics()
4230   void trackStatistics() const override {
4231     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4232   }
4233 };
4234 
4235 /// NoCapture attribute for function return value.
4236 struct AANoCaptureReturned final : AANoCaptureImpl {
4237   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4238       : AANoCaptureImpl(IRP, A) {
4239     llvm_unreachable("NoCapture is not applicable to function returns!");
4240   }
4241 
4242   /// See AbstractAttribute::initialize(...).
4243   void initialize(Attributor &A) override {
4244     llvm_unreachable("NoCapture is not applicable to function returns!");
4245   }
4246 
4247   /// See AbstractAttribute::updateImpl(...).
4248   ChangeStatus updateImpl(Attributor &A) override {
4249     llvm_unreachable("NoCapture is not applicable to function returns!");
4250   }
4251 
4252   /// See AbstractAttribute::trackStatistics()
4253   void trackStatistics() const override {}
4254 };
4255 
4256 /// NoCapture attribute deduction for a call site return value.
4257 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4258   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4259       : AANoCaptureImpl(IRP, A) {}
4260 
4261   /// See AbstractAttribute::trackStatistics()
4262   void trackStatistics() const override {
4263     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4264   }
4265 };
4266 
4267 /// ------------------ Value Simplify Attribute ----------------------------
4268 struct AAValueSimplifyImpl : AAValueSimplify {
4269   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4270       : AAValueSimplify(IRP, A) {}
4271 
4272   /// See AbstractAttribute::initialize(...).
4273   void initialize(Attributor &A) override {
4274     if (getAssociatedValue().getType()->isVoidTy())
4275       indicatePessimisticFixpoint();
4276   }
4277 
4278   /// See AbstractAttribute::getAsStr().
4279   const std::string getAsStr() const override {
4280     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4281                         : "not-simple";
4282   }
4283 
4284   /// See AbstractAttribute::trackStatistics()
4285   void trackStatistics() const override {}
4286 
4287   /// See AAValueSimplify::getAssumedSimplifiedValue()
4288   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4289     if (!getAssumed())
4290       return const_cast<Value *>(&getAssociatedValue());
4291     return SimplifiedAssociatedValue;
4292   }
4293 
4294   /// Helper function for querying AAValueSimplify and updating candicate.
4295   /// \param QueryingValue Value trying to unify with SimplifiedValue
4296   /// \param AccumulatedSimplifiedValue Current simplification result.
4297   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4298                              Value &QueryingValue,
4299                              Optional<Value *> &AccumulatedSimplifiedValue) {
4300     // FIXME: Add a typecast support.
4301 
4302     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4303         QueryingAA, IRPosition::value(QueryingValue));
4304 
4305     Optional<Value *> QueryingValueSimplified =
4306         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4307 
4308     if (!QueryingValueSimplified.hasValue())
4309       return true;
4310 
4311     if (!QueryingValueSimplified.getValue())
4312       return false;
4313 
4314     Value &QueryingValueSimplifiedUnwrapped =
4315         *QueryingValueSimplified.getValue();
4316 
4317     if (AccumulatedSimplifiedValue.hasValue() &&
4318         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4319         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4320       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4321     if (AccumulatedSimplifiedValue.hasValue() &&
4322         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4323       return true;
4324 
4325     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4326                       << " is assumed to be "
4327                       << QueryingValueSimplifiedUnwrapped << "\n");
4328 
4329     AccumulatedSimplifiedValue = QueryingValueSimplified;
4330     return true;
4331   }
4332 
4333   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4334     if (!getAssociatedValue().getType()->isIntegerTy())
4335       return false;
4336 
4337     const auto &ValueConstantRangeAA =
4338         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4339 
4340     Optional<ConstantInt *> COpt =
4341         ValueConstantRangeAA.getAssumedConstantInt(A);
4342     if (COpt.hasValue()) {
4343       if (auto *C = COpt.getValue())
4344         SimplifiedAssociatedValue = C;
4345       else
4346         return false;
4347     } else {
4348       SimplifiedAssociatedValue = llvm::None;
4349     }
4350     return true;
4351   }
4352 
4353   /// See AbstractAttribute::manifest(...).
4354   ChangeStatus manifest(Attributor &A) override {
4355     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4356 
4357     if (SimplifiedAssociatedValue.hasValue() &&
4358         !SimplifiedAssociatedValue.getValue())
4359       return Changed;
4360 
4361     Value &V = getAssociatedValue();
4362     auto *C = SimplifiedAssociatedValue.hasValue()
4363                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4364                   : UndefValue::get(V.getType());
4365     if (C) {
4366       // We can replace the AssociatedValue with the constant.
4367       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4368         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4369                           << " :: " << *this << "\n");
4370         if (A.changeValueAfterManifest(V, *C))
4371           Changed = ChangeStatus::CHANGED;
4372       }
4373     }
4374 
4375     return Changed | AAValueSimplify::manifest(A);
4376   }
4377 
4378   /// See AbstractState::indicatePessimisticFixpoint(...).
4379   ChangeStatus indicatePessimisticFixpoint() override {
4380     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4381     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4382     SimplifiedAssociatedValue = &getAssociatedValue();
4383     indicateOptimisticFixpoint();
4384     return ChangeStatus::CHANGED;
4385   }
4386 
4387 protected:
4388   // An assumed simplified value. Initially, it is set to Optional::None, which
4389   // means that the value is not clear under current assumption. If in the
4390   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4391   // returns orignal associated value.
4392   Optional<Value *> SimplifiedAssociatedValue;
4393 };
4394 
4395 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4396   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4397       : AAValueSimplifyImpl(IRP, A) {}
4398 
4399   void initialize(Attributor &A) override {
4400     AAValueSimplifyImpl::initialize(A);
4401     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4402       indicatePessimisticFixpoint();
4403     if (hasAttr({Attribute::InAlloca, Attribute::StructRet, Attribute::Nest},
4404                 /* IgnoreSubsumingPositions */ true))
4405       indicatePessimisticFixpoint();
4406 
4407     // FIXME: This is a hack to prevent us from propagating function poiner in
4408     // the new pass manager CGSCC pass as it creates call edges the
4409     // CallGraphUpdater cannot handle yet.
4410     Value &V = getAssociatedValue();
4411     if (V.getType()->isPointerTy() &&
4412         V.getType()->getPointerElementType()->isFunctionTy() &&
4413         !A.isModulePass())
4414       indicatePessimisticFixpoint();
4415   }
4416 
4417   /// See AbstractAttribute::updateImpl(...).
4418   ChangeStatus updateImpl(Attributor &A) override {
4419     // Byval is only replacable if it is readonly otherwise we would write into
4420     // the replaced value and not the copy that byval creates implicitly.
4421     Argument *Arg = getAssociatedArgument();
4422     if (Arg->hasByValAttr()) {
4423       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4424       //       there is no race by not copying a constant byval.
4425       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4426       if (!MemAA.isAssumedReadOnly())
4427         return indicatePessimisticFixpoint();
4428     }
4429 
4430     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4431 
4432     auto PredForCallSite = [&](AbstractCallSite ACS) {
4433       const IRPosition &ACSArgPos =
4434           IRPosition::callsite_argument(ACS, getArgNo());
4435       // Check if a coresponding argument was found or if it is on not
4436       // associated (which can happen for callback calls).
4437       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4438         return false;
4439 
4440       // We can only propagate thread independent values through callbacks.
4441       // This is different to direct/indirect call sites because for them we
4442       // know the thread executing the caller and callee is the same. For
4443       // callbacks this is not guaranteed, thus a thread dependent value could
4444       // be different for the caller and callee, making it invalid to propagate.
4445       Value &ArgOp = ACSArgPos.getAssociatedValue();
4446       if (ACS.isCallbackCall())
4447         if (auto *C = dyn_cast<Constant>(&ArgOp))
4448           if (C->isThreadDependent())
4449             return false;
4450       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4451     };
4452 
4453     bool AllCallSitesKnown;
4454     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4455                                 AllCallSitesKnown))
4456       if (!askSimplifiedValueForAAValueConstantRange(A))
4457         return indicatePessimisticFixpoint();
4458 
4459     // If a candicate was found in this update, return CHANGED.
4460     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4461                ? ChangeStatus::UNCHANGED
4462                : ChangeStatus ::CHANGED;
4463   }
4464 
4465   /// See AbstractAttribute::trackStatistics()
4466   void trackStatistics() const override {
4467     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4468   }
4469 };
4470 
4471 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4472   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4473       : AAValueSimplifyImpl(IRP, A) {}
4474 
4475   /// See AbstractAttribute::updateImpl(...).
4476   ChangeStatus updateImpl(Attributor &A) override {
4477     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4478 
4479     auto PredForReturned = [&](Value &V) {
4480       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4481     };
4482 
4483     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4484       if (!askSimplifiedValueForAAValueConstantRange(A))
4485         return indicatePessimisticFixpoint();
4486 
4487     // If a candicate was found in this update, return CHANGED.
4488     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4489                ? ChangeStatus::UNCHANGED
4490                : ChangeStatus ::CHANGED;
4491   }
4492 
4493   ChangeStatus manifest(Attributor &A) override {
4494     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4495 
4496     if (SimplifiedAssociatedValue.hasValue() &&
4497         !SimplifiedAssociatedValue.getValue())
4498       return Changed;
4499 
4500     Value &V = getAssociatedValue();
4501     auto *C = SimplifiedAssociatedValue.hasValue()
4502                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4503                   : UndefValue::get(V.getType());
4504     if (C) {
4505       auto PredForReturned =
4506           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4507             // We can replace the AssociatedValue with the constant.
4508             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4509               return true;
4510 
4511             for (ReturnInst *RI : RetInsts) {
4512               if (RI->getFunction() != getAnchorScope())
4513                 continue;
4514               auto *RC = C;
4515               if (RC->getType() != RI->getReturnValue()->getType())
4516                 RC = ConstantExpr::getBitCast(RC,
4517                                               RI->getReturnValue()->getType());
4518               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4519                                 << " in " << *RI << " :: " << *this << "\n");
4520               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4521                 Changed = ChangeStatus::CHANGED;
4522             }
4523             return true;
4524           };
4525       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4526     }
4527 
4528     return Changed | AAValueSimplify::manifest(A);
4529   }
4530 
4531   /// See AbstractAttribute::trackStatistics()
4532   void trackStatistics() const override {
4533     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4534   }
4535 };
4536 
4537 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4538   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4539       : AAValueSimplifyImpl(IRP, A) {}
4540 
4541   /// See AbstractAttribute::initialize(...).
4542   void initialize(Attributor &A) override {
4543     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4544     //        Needs investigation.
4545     // AAValueSimplifyImpl::initialize(A);
4546     Value &V = getAnchorValue();
4547 
4548     // TODO: add other stuffs
4549     if (isa<Constant>(V))
4550       indicatePessimisticFixpoint();
4551   }
4552 
4553   /// See AbstractAttribute::updateImpl(...).
4554   ChangeStatus updateImpl(Attributor &A) override {
4555     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4556 
4557     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4558                             bool Stripped) -> bool {
4559       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4560       if (!Stripped && this == &AA) {
4561         // TODO: Look the instruction and check recursively.
4562 
4563         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4564                           << "\n");
4565         return false;
4566       }
4567       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4568     };
4569 
4570     bool Dummy = false;
4571     if (!genericValueTraversal<AAValueSimplify, bool>(
4572             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4573             /* UseValueSimplify */ false))
4574       if (!askSimplifiedValueForAAValueConstantRange(A))
4575         return indicatePessimisticFixpoint();
4576 
4577     // If a candicate was found in this update, return CHANGED.
4578 
4579     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4580                ? ChangeStatus::UNCHANGED
4581                : ChangeStatus ::CHANGED;
4582   }
4583 
4584   /// See AbstractAttribute::trackStatistics()
4585   void trackStatistics() const override {
4586     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4587   }
4588 };
4589 
4590 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4591   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4592       : AAValueSimplifyImpl(IRP, A) {}
4593 
4594   /// See AbstractAttribute::initialize(...).
4595   void initialize(Attributor &A) override {
4596     SimplifiedAssociatedValue = &getAnchorValue();
4597     indicateOptimisticFixpoint();
4598   }
4599   /// See AbstractAttribute::initialize(...).
4600   ChangeStatus updateImpl(Attributor &A) override {
4601     llvm_unreachable(
4602         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4603   }
4604   /// See AbstractAttribute::trackStatistics()
4605   void trackStatistics() const override {
4606     STATS_DECLTRACK_FN_ATTR(value_simplify)
4607   }
4608 };
4609 
4610 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4611   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4612       : AAValueSimplifyFunction(IRP, A) {}
4613   /// See AbstractAttribute::trackStatistics()
4614   void trackStatistics() const override {
4615     STATS_DECLTRACK_CS_ATTR(value_simplify)
4616   }
4617 };
4618 
4619 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4620   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4621       : AAValueSimplifyReturned(IRP, A) {}
4622 
4623   /// See AbstractAttribute::manifest(...).
4624   ChangeStatus manifest(Attributor &A) override {
4625     return AAValueSimplifyImpl::manifest(A);
4626   }
4627 
4628   void trackStatistics() const override {
4629     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4630   }
4631 };
4632 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4633   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4634       : AAValueSimplifyFloating(IRP, A) {}
4635 
4636   void trackStatistics() const override {
4637     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4638   }
4639 };
4640 
4641 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4642 struct AAHeapToStackImpl : public AAHeapToStack {
4643   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4644       : AAHeapToStack(IRP, A) {}
4645 
4646   const std::string getAsStr() const override {
4647     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4648   }
4649 
4650   ChangeStatus manifest(Attributor &A) override {
4651     assert(getState().isValidState() &&
4652            "Attempted to manifest an invalid state!");
4653 
4654     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4655     Function *F = getAnchorScope();
4656     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4657 
4658     for (Instruction *MallocCall : MallocCalls) {
4659       // This malloc cannot be replaced.
4660       if (BadMallocCalls.count(MallocCall))
4661         continue;
4662 
4663       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4664         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4665         A.deleteAfterManifest(*FreeCall);
4666         HasChanged = ChangeStatus::CHANGED;
4667       }
4668 
4669       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4670                         << "\n");
4671 
4672       MaybeAlign Alignment;
4673       Constant *Size;
4674       if (isCallocLikeFn(MallocCall, TLI)) {
4675         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4676         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4677         APInt TotalSize = SizeT->getValue() * Num->getValue();
4678         Size =
4679             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4680       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4681         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4682         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4683                                    ->getValue()
4684                                    .getZExtValue());
4685       } else {
4686         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4687       }
4688 
4689       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4690       Instruction *AI =
4691           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4692                          "", MallocCall->getNextNode());
4693 
4694       if (AI->getType() != MallocCall->getType())
4695         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4696                              AI->getNextNode());
4697 
4698       A.changeValueAfterManifest(*MallocCall, *AI);
4699 
4700       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4701         auto *NBB = II->getNormalDest();
4702         BranchInst::Create(NBB, MallocCall->getParent());
4703         A.deleteAfterManifest(*MallocCall);
4704       } else {
4705         A.deleteAfterManifest(*MallocCall);
4706       }
4707 
4708       // Zero out the allocated memory if it was a calloc.
4709       if (isCallocLikeFn(MallocCall, TLI)) {
4710         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4711                                    AI->getNextNode());
4712         Value *Ops[] = {
4713             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4714             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4715 
4716         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4717         Module *M = F->getParent();
4718         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4719         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4720       }
4721       HasChanged = ChangeStatus::CHANGED;
4722     }
4723 
4724     return HasChanged;
4725   }
4726 
4727   /// Collection of all malloc calls in a function.
4728   SmallSetVector<Instruction *, 4> MallocCalls;
4729 
4730   /// Collection of malloc calls that cannot be converted.
4731   DenseSet<const Instruction *> BadMallocCalls;
4732 
4733   /// A map for each malloc call to the set of associated free calls.
4734   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4735 
4736   ChangeStatus updateImpl(Attributor &A) override;
4737 };
4738 
4739 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4740   const Function *F = getAnchorScope();
4741   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4742 
4743   MustBeExecutedContextExplorer &Explorer =
4744       A.getInfoCache().getMustBeExecutedContextExplorer();
4745 
4746   auto FreeCheck = [&](Instruction &I) {
4747     const auto &Frees = FreesForMalloc.lookup(&I);
4748     if (Frees.size() != 1)
4749       return false;
4750     Instruction *UniqueFree = *Frees.begin();
4751     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4752   };
4753 
4754   auto UsesCheck = [&](Instruction &I) {
4755     bool ValidUsesOnly = true;
4756     bool MustUse = true;
4757     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4758       Instruction *UserI = cast<Instruction>(U.getUser());
4759       if (isa<LoadInst>(UserI))
4760         return true;
4761       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4762         if (SI->getValueOperand() == U.get()) {
4763           LLVM_DEBUG(dbgs()
4764                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4765           ValidUsesOnly = false;
4766         } else {
4767           // A store into the malloc'ed memory is fine.
4768         }
4769         return true;
4770       }
4771       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4772         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4773           return true;
4774         // Record malloc.
4775         if (isFreeCall(UserI, TLI)) {
4776           if (MustUse) {
4777             FreesForMalloc[&I].insert(UserI);
4778           } else {
4779             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4780                               << *UserI << "\n");
4781             ValidUsesOnly = false;
4782           }
4783           return true;
4784         }
4785 
4786         unsigned ArgNo = CB->getArgOperandNo(&U);
4787 
4788         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4789             *this, IRPosition::callsite_argument(*CB, ArgNo));
4790 
4791         // If a callsite argument use is nofree, we are fine.
4792         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4793             *this, IRPosition::callsite_argument(*CB, ArgNo));
4794 
4795         if (!NoCaptureAA.isAssumedNoCapture() ||
4796             !ArgNoFreeAA.isAssumedNoFree()) {
4797           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4798           ValidUsesOnly = false;
4799         }
4800         return true;
4801       }
4802 
4803       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4804           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4805         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4806         Follow = true;
4807         return true;
4808       }
4809       // Unknown user for which we can not track uses further (in a way that
4810       // makes sense).
4811       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4812       ValidUsesOnly = false;
4813       return true;
4814     };
4815     A.checkForAllUses(Pred, *this, I);
4816     return ValidUsesOnly;
4817   };
4818 
4819   auto MallocCallocCheck = [&](Instruction &I) {
4820     if (BadMallocCalls.count(&I))
4821       return true;
4822 
4823     bool IsMalloc = isMallocLikeFn(&I, TLI);
4824     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4825     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4826     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4827       BadMallocCalls.insert(&I);
4828       return true;
4829     }
4830 
4831     if (IsMalloc) {
4832       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
4833         if (Size->getValue().ule(MaxHeapToStackSize))
4834           if (UsesCheck(I) || FreeCheck(I)) {
4835             MallocCalls.insert(&I);
4836             return true;
4837           }
4838     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
4839       // Only if the alignment and sizes are constant.
4840       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4841         if (Size->getValue().ule(MaxHeapToStackSize))
4842           if (UsesCheck(I) || FreeCheck(I)) {
4843             MallocCalls.insert(&I);
4844             return true;
4845           }
4846     } else if (IsCalloc) {
4847       bool Overflow = false;
4848       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
4849         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
4850           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
4851                   .ule(MaxHeapToStackSize))
4852             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
4853               MallocCalls.insert(&I);
4854               return true;
4855             }
4856     }
4857 
4858     BadMallocCalls.insert(&I);
4859     return true;
4860   };
4861 
4862   size_t NumBadMallocs = BadMallocCalls.size();
4863 
4864   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
4865 
4866   if (NumBadMallocs != BadMallocCalls.size())
4867     return ChangeStatus::CHANGED;
4868 
4869   return ChangeStatus::UNCHANGED;
4870 }
4871 
4872 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
4873   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
4874       : AAHeapToStackImpl(IRP, A) {}
4875 
4876   /// See AbstractAttribute::trackStatistics().
4877   void trackStatistics() const override {
4878     STATS_DECL(
4879         MallocCalls, Function,
4880         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
4881     for (auto *C : MallocCalls)
4882       if (!BadMallocCalls.count(C))
4883         ++BUILD_STAT_NAME(MallocCalls, Function);
4884   }
4885 };
4886 
4887 /// ----------------------- Privatizable Pointers ------------------------------
4888 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
4889   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
4890       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
4891 
4892   ChangeStatus indicatePessimisticFixpoint() override {
4893     AAPrivatizablePtr::indicatePessimisticFixpoint();
4894     PrivatizableType = nullptr;
4895     return ChangeStatus::CHANGED;
4896   }
4897 
4898   /// Identify the type we can chose for a private copy of the underlying
4899   /// argument. None means it is not clear yet, nullptr means there is none.
4900   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
4901 
4902   /// Return a privatizable type that encloses both T0 and T1.
4903   /// TODO: This is merely a stub for now as we should manage a mapping as well.
4904   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
4905     if (!T0.hasValue())
4906       return T1;
4907     if (!T1.hasValue())
4908       return T0;
4909     if (T0 == T1)
4910       return T0;
4911     return nullptr;
4912   }
4913 
4914   Optional<Type *> getPrivatizableType() const override {
4915     return PrivatizableType;
4916   }
4917 
4918   const std::string getAsStr() const override {
4919     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
4920   }
4921 
4922 protected:
4923   Optional<Type *> PrivatizableType;
4924 };
4925 
4926 // TODO: Do this for call site arguments (probably also other values) as well.
4927 
4928 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
4929   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
4930       : AAPrivatizablePtrImpl(IRP, A) {}
4931 
4932   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
4933   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
4934     // If this is a byval argument and we know all the call sites (so we can
4935     // rewrite them), there is no need to check them explicitly.
4936     bool AllCallSitesKnown;
4937     if (getIRPosition().hasAttr(Attribute::ByVal) &&
4938         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
4939                                true, AllCallSitesKnown))
4940       return getAssociatedValue().getType()->getPointerElementType();
4941 
4942     Optional<Type *> Ty;
4943     unsigned ArgNo = getIRPosition().getArgNo();
4944 
4945     // Make sure the associated call site argument has the same type at all call
4946     // sites and it is an allocation we know is safe to privatize, for now that
4947     // means we only allow alloca instructions.
4948     // TODO: We can additionally analyze the accesses in the callee to  create
4949     //       the type from that information instead. That is a little more
4950     //       involved and will be done in a follow up patch.
4951     auto CallSiteCheck = [&](AbstractCallSite ACS) {
4952       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
4953       // Check if a coresponding argument was found or if it is one not
4954       // associated (which can happen for callback calls).
4955       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4956         return false;
4957 
4958       // Check that all call sites agree on a type.
4959       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
4960       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
4961 
4962       LLVM_DEBUG({
4963         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
4964         if (CSTy.hasValue() && CSTy.getValue())
4965           CSTy.getValue()->print(dbgs());
4966         else if (CSTy.hasValue())
4967           dbgs() << "<nullptr>";
4968         else
4969           dbgs() << "<none>";
4970       });
4971 
4972       Ty = combineTypes(Ty, CSTy);
4973 
4974       LLVM_DEBUG({
4975         dbgs() << " : New Type: ";
4976         if (Ty.hasValue() && Ty.getValue())
4977           Ty.getValue()->print(dbgs());
4978         else if (Ty.hasValue())
4979           dbgs() << "<nullptr>";
4980         else
4981           dbgs() << "<none>";
4982         dbgs() << "\n";
4983       });
4984 
4985       return !Ty.hasValue() || Ty.getValue();
4986     };
4987 
4988     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
4989       return nullptr;
4990     return Ty;
4991   }
4992 
4993   /// See AbstractAttribute::updateImpl(...).
4994   ChangeStatus updateImpl(Attributor &A) override {
4995     PrivatizableType = identifyPrivatizableType(A);
4996     if (!PrivatizableType.hasValue())
4997       return ChangeStatus::UNCHANGED;
4998     if (!PrivatizableType.getValue())
4999       return indicatePessimisticFixpoint();
5000 
5001     // Avoid arguments with padding for now.
5002     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5003         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5004                                                 A.getInfoCache().getDL())) {
5005       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5006       return indicatePessimisticFixpoint();
5007     }
5008 
5009     // Verify callee and caller agree on how the promoted argument would be
5010     // passed.
5011     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5012     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5013     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5014     Function &Fn = *getIRPosition().getAnchorScope();
5015     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5016     ArgsToPromote.insert(getAssociatedArgument());
5017     const auto *TTI =
5018         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5019     if (!TTI ||
5020         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5021             Fn, *TTI, ArgsToPromote, Dummy) ||
5022         ArgsToPromote.empty()) {
5023       LLVM_DEBUG(
5024           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5025                  << Fn.getName() << "\n");
5026       return indicatePessimisticFixpoint();
5027     }
5028 
5029     // Collect the types that will replace the privatizable type in the function
5030     // signature.
5031     SmallVector<Type *, 16> ReplacementTypes;
5032     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5033 
5034     // Register a rewrite of the argument.
5035     Argument *Arg = getAssociatedArgument();
5036     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5037       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5038       return indicatePessimisticFixpoint();
5039     }
5040 
5041     unsigned ArgNo = Arg->getArgNo();
5042 
5043     // Helper to check if for the given call site the associated argument is
5044     // passed to a callback where the privatization would be different.
5045     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5046       SmallVector<const Use *, 4> CallbackUses;
5047       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5048       for (const Use *U : CallbackUses) {
5049         AbstractCallSite CBACS(U);
5050         assert(CBACS && CBACS.isCallbackCall());
5051         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5052           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5053 
5054           LLVM_DEBUG({
5055             dbgs()
5056                 << "[AAPrivatizablePtr] Argument " << *Arg
5057                 << "check if can be privatized in the context of its parent ("
5058                 << Arg->getParent()->getName()
5059                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5060                    "callback ("
5061                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5062                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5063                 << CBACS.getCallArgOperand(CBArg) << " vs "
5064                 << CB.getArgOperand(ArgNo) << "\n"
5065                 << "[AAPrivatizablePtr] " << CBArg << " : "
5066                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5067           });
5068 
5069           if (CBArgNo != int(ArgNo))
5070             continue;
5071           const auto &CBArgPrivAA =
5072               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5073           if (CBArgPrivAA.isValidState()) {
5074             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5075             if (!CBArgPrivTy.hasValue())
5076               continue;
5077             if (CBArgPrivTy.getValue() == PrivatizableType)
5078               continue;
5079           }
5080 
5081           LLVM_DEBUG({
5082             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5083                    << " cannot be privatized in the context of its parent ("
5084                    << Arg->getParent()->getName()
5085                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5086                       "callback ("
5087                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5088                    << ").\n[AAPrivatizablePtr] for which the argument "
5089                       "privatization is not compatible.\n";
5090           });
5091           return false;
5092         }
5093       }
5094       return true;
5095     };
5096 
5097     // Helper to check if for the given call site the associated argument is
5098     // passed to a direct call where the privatization would be different.
5099     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5100       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5101       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5102       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5103              "Expected a direct call operand for callback call operand");
5104 
5105       LLVM_DEBUG({
5106         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5107                << " check if be privatized in the context of its parent ("
5108                << Arg->getParent()->getName()
5109                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5110                   "direct call of ("
5111                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5112                << ").\n";
5113       });
5114 
5115       Function *DCCallee = DC->getCalledFunction();
5116       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5117         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5118             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5119         if (DCArgPrivAA.isValidState()) {
5120           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5121           if (!DCArgPrivTy.hasValue())
5122             return true;
5123           if (DCArgPrivTy.getValue() == PrivatizableType)
5124             return true;
5125         }
5126       }
5127 
5128       LLVM_DEBUG({
5129         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5130                << " cannot be privatized in the context of its parent ("
5131                << Arg->getParent()->getName()
5132                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5133                   "direct call of ("
5134                << ACS.getInstruction()->getCalledFunction()->getName()
5135                << ").\n[AAPrivatizablePtr] for which the argument "
5136                   "privatization is not compatible.\n";
5137       });
5138       return false;
5139     };
5140 
5141     // Helper to check if the associated argument is used at the given abstract
5142     // call site in a way that is incompatible with the privatization assumed
5143     // here.
5144     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5145       if (ACS.isDirectCall())
5146         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5147       if (ACS.isCallbackCall())
5148         return IsCompatiblePrivArgOfDirectCS(ACS);
5149       return false;
5150     };
5151 
5152     bool AllCallSitesKnown;
5153     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5154                                 AllCallSitesKnown))
5155       return indicatePessimisticFixpoint();
5156 
5157     return ChangeStatus::UNCHANGED;
5158   }
5159 
5160   /// Given a type to private \p PrivType, collect the constituates (which are
5161   /// used) in \p ReplacementTypes.
5162   static void
5163   identifyReplacementTypes(Type *PrivType,
5164                            SmallVectorImpl<Type *> &ReplacementTypes) {
5165     // TODO: For now we expand the privatization type to the fullest which can
5166     //       lead to dead arguments that need to be removed later.
5167     assert(PrivType && "Expected privatizable type!");
5168 
5169     // Traverse the type, extract constituate types on the outermost level.
5170     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5171       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5172         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5173     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5174       ReplacementTypes.append(PrivArrayType->getNumElements(),
5175                               PrivArrayType->getElementType());
5176     } else {
5177       ReplacementTypes.push_back(PrivType);
5178     }
5179   }
5180 
5181   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5182   /// The values needed are taken from the arguments of \p F starting at
5183   /// position \p ArgNo.
5184   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5185                                    unsigned ArgNo, Instruction &IP) {
5186     assert(PrivType && "Expected privatizable type!");
5187 
5188     IRBuilder<NoFolder> IRB(&IP);
5189     const DataLayout &DL = F.getParent()->getDataLayout();
5190 
5191     // Traverse the type, build GEPs and stores.
5192     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5193       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5194       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5195         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5196         Value *Ptr = constructPointer(
5197             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5198         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5199       }
5200     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5201       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5202       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5203       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5204         Value *Ptr =
5205             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5206         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5207       }
5208     } else {
5209       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5210     }
5211   }
5212 
5213   /// Extract values from \p Base according to the type \p PrivType at the
5214   /// call position \p ACS. The values are appended to \p ReplacementValues.
5215   void createReplacementValues(Type *PrivType, AbstractCallSite ACS,
5216                                Value *Base,
5217                                SmallVectorImpl<Value *> &ReplacementValues) {
5218     assert(Base && "Expected base value!");
5219     assert(PrivType && "Expected privatizable type!");
5220     Instruction *IP = ACS.getInstruction();
5221 
5222     IRBuilder<NoFolder> IRB(IP);
5223     const DataLayout &DL = IP->getModule()->getDataLayout();
5224 
5225     if (Base->getType()->getPointerElementType() != PrivType)
5226       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5227                                                  "", ACS.getInstruction());
5228 
5229     // TODO: Improve the alignment of the loads.
5230     // Traverse the type, build GEPs and loads.
5231     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5232       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5233       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5234         Type *PointeeTy = PrivStructType->getElementType(u);
5235         Value *Ptr =
5236             constructPointer(PointeeTy->getPointerTo(), Base,
5237                              PrivStructLayout->getElementOffset(u), IRB, DL);
5238         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5239         L->setAlignment(Align(1));
5240         ReplacementValues.push_back(L);
5241       }
5242     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5243       Type *PointeeTy = PrivArrayType->getElementType();
5244       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5245       Type *PointeePtrTy = PointeeTy->getPointerTo();
5246       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5247         Value *Ptr =
5248             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5249         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5250         L->setAlignment(Align(1));
5251         ReplacementValues.push_back(L);
5252       }
5253     } else {
5254       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5255       L->setAlignment(Align(1));
5256       ReplacementValues.push_back(L);
5257     }
5258   }
5259 
5260   /// See AbstractAttribute::manifest(...)
5261   ChangeStatus manifest(Attributor &A) override {
5262     if (!PrivatizableType.hasValue())
5263       return ChangeStatus::UNCHANGED;
5264     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5265 
5266     // Collect all tail calls in the function as we cannot allow new allocas to
5267     // escape into tail recursion.
5268     // TODO: Be smarter about new allocas escaping into tail calls.
5269     SmallVector<CallInst *, 16> TailCalls;
5270     if (!A.checkForAllInstructions(
5271             [&](Instruction &I) {
5272               CallInst &CI = cast<CallInst>(I);
5273               if (CI.isTailCall())
5274                 TailCalls.push_back(&CI);
5275               return true;
5276             },
5277             *this, {Instruction::Call}))
5278       return ChangeStatus::UNCHANGED;
5279 
5280     Argument *Arg = getAssociatedArgument();
5281 
5282     // Callback to repair the associated function. A new alloca is placed at the
5283     // beginning and initialized with the values passed through arguments. The
5284     // new alloca replaces the use of the old pointer argument.
5285     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5286         [=](const Attributor::ArgumentReplacementInfo &ARI,
5287             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5288           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5289           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5290           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5291                                     Arg->getName() + ".priv", IP);
5292           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5293                                ArgIt->getArgNo(), *IP);
5294           Arg->replaceAllUsesWith(AI);
5295 
5296           for (CallInst *CI : TailCalls)
5297             CI->setTailCall(false);
5298         };
5299 
5300     // Callback to repair a call site of the associated function. The elements
5301     // of the privatizable type are loaded prior to the call and passed to the
5302     // new function version.
5303     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5304         [=](const Attributor::ArgumentReplacementInfo &ARI,
5305             AbstractCallSite ACS, SmallVectorImpl<Value *> &NewArgOperands) {
5306           createReplacementValues(
5307               PrivatizableType.getValue(), ACS,
5308               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5309               NewArgOperands);
5310         };
5311 
5312     // Collect the types that will replace the privatizable type in the function
5313     // signature.
5314     SmallVector<Type *, 16> ReplacementTypes;
5315     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5316 
5317     // Register a rewrite of the argument.
5318     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5319                                            std::move(FnRepairCB),
5320                                            std::move(ACSRepairCB)))
5321       return ChangeStatus::CHANGED;
5322     return ChangeStatus::UNCHANGED;
5323   }
5324 
5325   /// See AbstractAttribute::trackStatistics()
5326   void trackStatistics() const override {
5327     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5328   }
5329 };
5330 
5331 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5332   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5333       : AAPrivatizablePtrImpl(IRP, A) {}
5334 
5335   /// See AbstractAttribute::initialize(...).
5336   virtual void initialize(Attributor &A) override {
5337     // TODO: We can privatize more than arguments.
5338     indicatePessimisticFixpoint();
5339   }
5340 
5341   ChangeStatus updateImpl(Attributor &A) override {
5342     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5343                      "updateImpl will not be called");
5344   }
5345 
5346   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5347   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5348     Value *Obj =
5349         GetUnderlyingObject(&getAssociatedValue(), A.getInfoCache().getDL());
5350     if (!Obj) {
5351       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5352       return nullptr;
5353     }
5354 
5355     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5356       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5357         if (CI->isOne())
5358           return Obj->getType()->getPointerElementType();
5359     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5360       auto &PrivArgAA =
5361           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5362       if (PrivArgAA.isAssumedPrivatizablePtr())
5363         return Obj->getType()->getPointerElementType();
5364     }
5365 
5366     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5367                          "alloca nor privatizable argument: "
5368                       << *Obj << "!\n");
5369     return nullptr;
5370   }
5371 
5372   /// See AbstractAttribute::trackStatistics()
5373   void trackStatistics() const override {
5374     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5375   }
5376 };
5377 
5378 struct AAPrivatizablePtrCallSiteArgument final
5379     : public AAPrivatizablePtrFloating {
5380   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5381       : AAPrivatizablePtrFloating(IRP, A) {}
5382 
5383   /// See AbstractAttribute::initialize(...).
5384   void initialize(Attributor &A) override {
5385     if (getIRPosition().hasAttr(Attribute::ByVal))
5386       indicateOptimisticFixpoint();
5387   }
5388 
5389   /// See AbstractAttribute::updateImpl(...).
5390   ChangeStatus updateImpl(Attributor &A) override {
5391     PrivatizableType = identifyPrivatizableType(A);
5392     if (!PrivatizableType.hasValue())
5393       return ChangeStatus::UNCHANGED;
5394     if (!PrivatizableType.getValue())
5395       return indicatePessimisticFixpoint();
5396 
5397     const IRPosition &IRP = getIRPosition();
5398     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5399     if (!NoCaptureAA.isAssumedNoCapture()) {
5400       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5401       return indicatePessimisticFixpoint();
5402     }
5403 
5404     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5405     if (!NoAliasAA.isAssumedNoAlias()) {
5406       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5407       return indicatePessimisticFixpoint();
5408     }
5409 
5410     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5411     if (!MemBehaviorAA.isAssumedReadOnly()) {
5412       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5413       return indicatePessimisticFixpoint();
5414     }
5415 
5416     return ChangeStatus::UNCHANGED;
5417   }
5418 
5419   /// See AbstractAttribute::trackStatistics()
5420   void trackStatistics() const override {
5421     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5422   }
5423 };
5424 
5425 struct AAPrivatizablePtrCallSiteReturned final
5426     : public AAPrivatizablePtrFloating {
5427   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5428       : AAPrivatizablePtrFloating(IRP, A) {}
5429 
5430   /// See AbstractAttribute::initialize(...).
5431   void initialize(Attributor &A) override {
5432     // TODO: We can privatize more than arguments.
5433     indicatePessimisticFixpoint();
5434   }
5435 
5436   /// See AbstractAttribute::trackStatistics()
5437   void trackStatistics() const override {
5438     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5439   }
5440 };
5441 
5442 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5443   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5444       : AAPrivatizablePtrFloating(IRP, A) {}
5445 
5446   /// See AbstractAttribute::initialize(...).
5447   void initialize(Attributor &A) override {
5448     // TODO: We can privatize more than arguments.
5449     indicatePessimisticFixpoint();
5450   }
5451 
5452   /// See AbstractAttribute::trackStatistics()
5453   void trackStatistics() const override {
5454     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5455   }
5456 };
5457 
5458 /// -------------------- Memory Behavior Attributes ----------------------------
5459 /// Includes read-none, read-only, and write-only.
5460 /// ----------------------------------------------------------------------------
5461 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5462   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5463       : AAMemoryBehavior(IRP, A) {}
5464 
5465   /// See AbstractAttribute::initialize(...).
5466   void initialize(Attributor &A) override {
5467     intersectAssumedBits(BEST_STATE);
5468     getKnownStateFromValue(getIRPosition(), getState());
5469     IRAttribute::initialize(A);
5470   }
5471 
5472   /// Return the memory behavior information encoded in the IR for \p IRP.
5473   static void getKnownStateFromValue(const IRPosition &IRP,
5474                                      BitIntegerState &State,
5475                                      bool IgnoreSubsumingPositions = false) {
5476     SmallVector<Attribute, 2> Attrs;
5477     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5478     for (const Attribute &Attr : Attrs) {
5479       switch (Attr.getKindAsEnum()) {
5480       case Attribute::ReadNone:
5481         State.addKnownBits(NO_ACCESSES);
5482         break;
5483       case Attribute::ReadOnly:
5484         State.addKnownBits(NO_WRITES);
5485         break;
5486       case Attribute::WriteOnly:
5487         State.addKnownBits(NO_READS);
5488         break;
5489       default:
5490         llvm_unreachable("Unexpected attribute!");
5491       }
5492     }
5493 
5494     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5495       if (!I->mayReadFromMemory())
5496         State.addKnownBits(NO_READS);
5497       if (!I->mayWriteToMemory())
5498         State.addKnownBits(NO_WRITES);
5499     }
5500   }
5501 
5502   /// See AbstractAttribute::getDeducedAttributes(...).
5503   void getDeducedAttributes(LLVMContext &Ctx,
5504                             SmallVectorImpl<Attribute> &Attrs) const override {
5505     assert(Attrs.size() == 0);
5506     if (isAssumedReadNone())
5507       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5508     else if (isAssumedReadOnly())
5509       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5510     else if (isAssumedWriteOnly())
5511       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5512     assert(Attrs.size() <= 1);
5513   }
5514 
5515   /// See AbstractAttribute::manifest(...).
5516   ChangeStatus manifest(Attributor &A) override {
5517     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5518       return ChangeStatus::UNCHANGED;
5519 
5520     const IRPosition &IRP = getIRPosition();
5521 
5522     // Check if we would improve the existing attributes first.
5523     SmallVector<Attribute, 4> DeducedAttrs;
5524     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5525     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5526           return IRP.hasAttr(Attr.getKindAsEnum(),
5527                              /* IgnoreSubsumingPositions */ true);
5528         }))
5529       return ChangeStatus::UNCHANGED;
5530 
5531     // Clear existing attributes.
5532     IRP.removeAttrs(AttrKinds);
5533 
5534     // Use the generic manifest method.
5535     return IRAttribute::manifest(A);
5536   }
5537 
5538   /// See AbstractState::getAsStr().
5539   const std::string getAsStr() const override {
5540     if (isAssumedReadNone())
5541       return "readnone";
5542     if (isAssumedReadOnly())
5543       return "readonly";
5544     if (isAssumedWriteOnly())
5545       return "writeonly";
5546     return "may-read/write";
5547   }
5548 
5549   /// The set of IR attributes AAMemoryBehavior deals with.
5550   static const Attribute::AttrKind AttrKinds[3];
5551 };
5552 
5553 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5554     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5555 
5556 /// Memory behavior attribute for a floating value.
5557 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5558   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5559       : AAMemoryBehaviorImpl(IRP, A) {}
5560 
5561   /// See AbstractAttribute::initialize(...).
5562   void initialize(Attributor &A) override {
5563     AAMemoryBehaviorImpl::initialize(A);
5564     // Initialize the use vector with all direct uses of the associated value.
5565     for (const Use &U : getAssociatedValue().uses())
5566       Uses.insert(&U);
5567   }
5568 
5569   /// See AbstractAttribute::updateImpl(...).
5570   ChangeStatus updateImpl(Attributor &A) override;
5571 
5572   /// See AbstractAttribute::trackStatistics()
5573   void trackStatistics() const override {
5574     if (isAssumedReadNone())
5575       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5576     else if (isAssumedReadOnly())
5577       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5578     else if (isAssumedWriteOnly())
5579       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5580   }
5581 
5582 private:
5583   /// Return true if users of \p UserI might access the underlying
5584   /// variable/location described by \p U and should therefore be analyzed.
5585   bool followUsersOfUseIn(Attributor &A, const Use *U,
5586                           const Instruction *UserI);
5587 
5588   /// Update the state according to the effect of use \p U in \p UserI.
5589   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5590 
5591 protected:
5592   /// Container for (transitive) uses of the associated argument.
5593   SetVector<const Use *> Uses;
5594 };
5595 
5596 /// Memory behavior attribute for function argument.
5597 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5598   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5599       : AAMemoryBehaviorFloating(IRP, A) {}
5600 
5601   /// See AbstractAttribute::initialize(...).
5602   void initialize(Attributor &A) override {
5603     intersectAssumedBits(BEST_STATE);
5604     const IRPosition &IRP = getIRPosition();
5605     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5606     // can query it when we use has/getAttr. That would allow us to reuse the
5607     // initialize of the base class here.
5608     bool HasByVal =
5609         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5610     getKnownStateFromValue(IRP, getState(),
5611                            /* IgnoreSubsumingPositions */ HasByVal);
5612 
5613     // Initialize the use vector with all direct uses of the associated value.
5614     Argument *Arg = getAssociatedArgument();
5615     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5616       indicatePessimisticFixpoint();
5617     } else {
5618       // Initialize the use vector with all direct uses of the associated value.
5619       for (const Use &U : Arg->uses())
5620         Uses.insert(&U);
5621     }
5622   }
5623 
5624   ChangeStatus manifest(Attributor &A) override {
5625     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5626     if (!getAssociatedValue().getType()->isPointerTy())
5627       return ChangeStatus::UNCHANGED;
5628 
5629     // TODO: From readattrs.ll: "inalloca parameters are always
5630     //                           considered written"
5631     if (hasAttr({Attribute::InAlloca})) {
5632       removeKnownBits(NO_WRITES);
5633       removeAssumedBits(NO_WRITES);
5634     }
5635     return AAMemoryBehaviorFloating::manifest(A);
5636   }
5637 
5638   /// See AbstractAttribute::trackStatistics()
5639   void trackStatistics() const override {
5640     if (isAssumedReadNone())
5641       STATS_DECLTRACK_ARG_ATTR(readnone)
5642     else if (isAssumedReadOnly())
5643       STATS_DECLTRACK_ARG_ATTR(readonly)
5644     else if (isAssumedWriteOnly())
5645       STATS_DECLTRACK_ARG_ATTR(writeonly)
5646   }
5647 };
5648 
5649 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5650   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5651       : AAMemoryBehaviorArgument(IRP, A) {}
5652 
5653   /// See AbstractAttribute::initialize(...).
5654   void initialize(Attributor &A) override {
5655     if (Argument *Arg = getAssociatedArgument()) {
5656       if (Arg->hasByValAttr()) {
5657         addKnownBits(NO_WRITES);
5658         removeKnownBits(NO_READS);
5659         removeAssumedBits(NO_READS);
5660       }
5661     }
5662     AAMemoryBehaviorArgument::initialize(A);
5663   }
5664 
5665   /// See AbstractAttribute::updateImpl(...).
5666   ChangeStatus updateImpl(Attributor &A) override {
5667     // TODO: Once we have call site specific value information we can provide
5668     //       call site specific liveness liveness information and then it makes
5669     //       sense to specialize attributes for call sites arguments instead of
5670     //       redirecting requests to the callee argument.
5671     Argument *Arg = getAssociatedArgument();
5672     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5673     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5674     return clampStateAndIndicateChange(
5675         getState(),
5676         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5677   }
5678 
5679   /// See AbstractAttribute::trackStatistics()
5680   void trackStatistics() const override {
5681     if (isAssumedReadNone())
5682       STATS_DECLTRACK_CSARG_ATTR(readnone)
5683     else if (isAssumedReadOnly())
5684       STATS_DECLTRACK_CSARG_ATTR(readonly)
5685     else if (isAssumedWriteOnly())
5686       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5687   }
5688 };
5689 
5690 /// Memory behavior attribute for a call site return position.
5691 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5692   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5693       : AAMemoryBehaviorFloating(IRP, A) {}
5694 
5695   /// See AbstractAttribute::manifest(...).
5696   ChangeStatus manifest(Attributor &A) override {
5697     // We do not annotate returned values.
5698     return ChangeStatus::UNCHANGED;
5699   }
5700 
5701   /// See AbstractAttribute::trackStatistics()
5702   void trackStatistics() const override {}
5703 };
5704 
5705 /// An AA to represent the memory behavior function attributes.
5706 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5707   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5708       : AAMemoryBehaviorImpl(IRP, A) {}
5709 
5710   /// See AbstractAttribute::updateImpl(Attributor &A).
5711   virtual ChangeStatus updateImpl(Attributor &A) override;
5712 
5713   /// See AbstractAttribute::manifest(...).
5714   ChangeStatus manifest(Attributor &A) override {
5715     Function &F = cast<Function>(getAnchorValue());
5716     if (isAssumedReadNone()) {
5717       F.removeFnAttr(Attribute::ArgMemOnly);
5718       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5719       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5720     }
5721     return AAMemoryBehaviorImpl::manifest(A);
5722   }
5723 
5724   /// See AbstractAttribute::trackStatistics()
5725   void trackStatistics() const override {
5726     if (isAssumedReadNone())
5727       STATS_DECLTRACK_FN_ATTR(readnone)
5728     else if (isAssumedReadOnly())
5729       STATS_DECLTRACK_FN_ATTR(readonly)
5730     else if (isAssumedWriteOnly())
5731       STATS_DECLTRACK_FN_ATTR(writeonly)
5732   }
5733 };
5734 
5735 /// AAMemoryBehavior attribute for call sites.
5736 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5737   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5738       : AAMemoryBehaviorImpl(IRP, A) {}
5739 
5740   /// See AbstractAttribute::initialize(...).
5741   void initialize(Attributor &A) override {
5742     AAMemoryBehaviorImpl::initialize(A);
5743     Function *F = getAssociatedFunction();
5744     if (!F || !A.isFunctionIPOAmendable(*F)) {
5745       indicatePessimisticFixpoint();
5746       return;
5747     }
5748   }
5749 
5750   /// See AbstractAttribute::updateImpl(...).
5751   ChangeStatus updateImpl(Attributor &A) override {
5752     // TODO: Once we have call site specific value information we can provide
5753     //       call site specific liveness liveness information and then it makes
5754     //       sense to specialize attributes for call sites arguments instead of
5755     //       redirecting requests to the callee argument.
5756     Function *F = getAssociatedFunction();
5757     const IRPosition &FnPos = IRPosition::function(*F);
5758     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5759     return clampStateAndIndicateChange(
5760         getState(),
5761         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5762   }
5763 
5764   /// See AbstractAttribute::trackStatistics()
5765   void trackStatistics() const override {
5766     if (isAssumedReadNone())
5767       STATS_DECLTRACK_CS_ATTR(readnone)
5768     else if (isAssumedReadOnly())
5769       STATS_DECLTRACK_CS_ATTR(readonly)
5770     else if (isAssumedWriteOnly())
5771       STATS_DECLTRACK_CS_ATTR(writeonly)
5772   }
5773 };
5774 
5775 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5776 
5777   // The current assumed state used to determine a change.
5778   auto AssumedState = getAssumed();
5779 
5780   auto CheckRWInst = [&](Instruction &I) {
5781     // If the instruction has an own memory behavior state, use it to restrict
5782     // the local state. No further analysis is required as the other memory
5783     // state is as optimistic as it gets.
5784     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5785       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5786           *this, IRPosition::callsite_function(*CB));
5787       intersectAssumedBits(MemBehaviorAA.getAssumed());
5788       return !isAtFixpoint();
5789     }
5790 
5791     // Remove access kind modifiers if necessary.
5792     if (I.mayReadFromMemory())
5793       removeAssumedBits(NO_READS);
5794     if (I.mayWriteToMemory())
5795       removeAssumedBits(NO_WRITES);
5796     return !isAtFixpoint();
5797   };
5798 
5799   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5800     return indicatePessimisticFixpoint();
5801 
5802   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5803                                         : ChangeStatus::UNCHANGED;
5804 }
5805 
5806 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5807 
5808   const IRPosition &IRP = getIRPosition();
5809   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5810   AAMemoryBehavior::StateType &S = getState();
5811 
5812   // First, check the function scope. We take the known information and we avoid
5813   // work if the assumed information implies the current assumed information for
5814   // this attribute. This is a valid for all but byval arguments.
5815   Argument *Arg = IRP.getAssociatedArgument();
5816   AAMemoryBehavior::base_t FnMemAssumedState =
5817       AAMemoryBehavior::StateType::getWorstState();
5818   if (!Arg || !Arg->hasByValAttr()) {
5819     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
5820         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5821     FnMemAssumedState = FnMemAA.getAssumed();
5822     S.addKnownBits(FnMemAA.getKnown());
5823     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
5824       return ChangeStatus::UNCHANGED;
5825   }
5826 
5827   // Make sure the value is not captured (except through "return"), if
5828   // it is, any information derived would be irrelevant anyway as we cannot
5829   // check the potential aliases introduced by the capture. However, no need
5830   // to fall back to anythign less optimistic than the function state.
5831   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5832       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
5833   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
5834     S.intersectAssumedBits(FnMemAssumedState);
5835     return ChangeStatus::CHANGED;
5836   }
5837 
5838   // The current assumed state used to determine a change.
5839   auto AssumedState = S.getAssumed();
5840 
5841   // Liveness information to exclude dead users.
5842   // TODO: Take the FnPos once we have call site specific liveness information.
5843   const auto &LivenessAA = A.getAAFor<AAIsDead>(
5844       *this, IRPosition::function(*IRP.getAssociatedFunction()),
5845       /* TrackDependence */ false);
5846 
5847   // Visit and expand uses until all are analyzed or a fixpoint is reached.
5848   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
5849     const Use *U = Uses[i];
5850     Instruction *UserI = cast<Instruction>(U->getUser());
5851     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
5852                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
5853                       << "]\n");
5854     if (A.isAssumedDead(*U, this, &LivenessAA))
5855       continue;
5856 
5857     // Droppable users, e.g., llvm::assume does not actually perform any action.
5858     if (UserI->isDroppable())
5859       continue;
5860 
5861     // Check if the users of UserI should also be visited.
5862     if (followUsersOfUseIn(A, U, UserI))
5863       for (const Use &UserIUse : UserI->uses())
5864         Uses.insert(&UserIUse);
5865 
5866     // If UserI might touch memory we analyze the use in detail.
5867     if (UserI->mayReadOrWriteMemory())
5868       analyzeUseIn(A, U, UserI);
5869   }
5870 
5871   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5872                                         : ChangeStatus::UNCHANGED;
5873 }
5874 
5875 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
5876                                                   const Instruction *UserI) {
5877   // The loaded value is unrelated to the pointer argument, no need to
5878   // follow the users of the load.
5879   if (isa<LoadInst>(UserI))
5880     return false;
5881 
5882   // By default we follow all uses assuming UserI might leak information on U,
5883   // we have special handling for call sites operands though.
5884   const auto *CB = dyn_cast<CallBase>(UserI);
5885   if (!CB || !CB->isArgOperand(U))
5886     return true;
5887 
5888   // If the use is a call argument known not to be captured, the users of
5889   // the call do not need to be visited because they have to be unrelated to
5890   // the input. Note that this check is not trivial even though we disallow
5891   // general capturing of the underlying argument. The reason is that the
5892   // call might the argument "through return", which we allow and for which we
5893   // need to check call users.
5894   if (U->get()->getType()->isPointerTy()) {
5895     unsigned ArgNo = CB->getArgOperandNo(U);
5896     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
5897         *this, IRPosition::callsite_argument(*CB, ArgNo),
5898         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5899     return !ArgNoCaptureAA.isAssumedNoCapture();
5900   }
5901 
5902   return true;
5903 }
5904 
5905 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
5906                                             const Instruction *UserI) {
5907   assert(UserI->mayReadOrWriteMemory());
5908 
5909   switch (UserI->getOpcode()) {
5910   default:
5911     // TODO: Handle all atomics and other side-effect operations we know of.
5912     break;
5913   case Instruction::Load:
5914     // Loads cause the NO_READS property to disappear.
5915     removeAssumedBits(NO_READS);
5916     return;
5917 
5918   case Instruction::Store:
5919     // Stores cause the NO_WRITES property to disappear if the use is the
5920     // pointer operand. Note that we do assume that capturing was taken care of
5921     // somewhere else.
5922     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
5923       removeAssumedBits(NO_WRITES);
5924     return;
5925 
5926   case Instruction::Call:
5927   case Instruction::CallBr:
5928   case Instruction::Invoke: {
5929     // For call sites we look at the argument memory behavior attribute (this
5930     // could be recursive!) in order to restrict our own state.
5931     const auto *CB = cast<CallBase>(UserI);
5932 
5933     // Give up on operand bundles.
5934     if (CB->isBundleOperand(U)) {
5935       indicatePessimisticFixpoint();
5936       return;
5937     }
5938 
5939     // Calling a function does read the function pointer, maybe write it if the
5940     // function is self-modifying.
5941     if (CB->isCallee(U)) {
5942       removeAssumedBits(NO_READS);
5943       break;
5944     }
5945 
5946     // Adjust the possible access behavior based on the information on the
5947     // argument.
5948     IRPosition Pos;
5949     if (U->get()->getType()->isPointerTy())
5950       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
5951     else
5952       Pos = IRPosition::callsite_function(*CB);
5953     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5954         *this, Pos,
5955         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5956     // "assumed" has at most the same bits as the MemBehaviorAA assumed
5957     // and at least "known".
5958     intersectAssumedBits(MemBehaviorAA.getAssumed());
5959     return;
5960   }
5961   };
5962 
5963   // Generally, look at the "may-properties" and adjust the assumed state if we
5964   // did not trigger special handling before.
5965   if (UserI->mayReadFromMemory())
5966     removeAssumedBits(NO_READS);
5967   if (UserI->mayWriteToMemory())
5968     removeAssumedBits(NO_WRITES);
5969 }
5970 
5971 } // namespace
5972 
5973 /// -------------------- Memory Locations Attributes ---------------------------
5974 /// Includes read-none, argmemonly, inaccessiblememonly,
5975 /// inaccessiblememorargmemonly
5976 /// ----------------------------------------------------------------------------
5977 
5978 std::string AAMemoryLocation::getMemoryLocationsAsStr(
5979     AAMemoryLocation::MemoryLocationsKind MLK) {
5980   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
5981     return "all memory";
5982   if (MLK == AAMemoryLocation::NO_LOCATIONS)
5983     return "no memory";
5984   std::string S = "memory:";
5985   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
5986     S += "stack,";
5987   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
5988     S += "constant,";
5989   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
5990     S += "internal global,";
5991   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
5992     S += "external global,";
5993   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
5994     S += "argument,";
5995   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
5996     S += "inaccessible,";
5997   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
5998     S += "malloced,";
5999   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6000     S += "unknown,";
6001   S.pop_back();
6002   return S;
6003 }
6004 
6005 namespace {
6006 struct AAMemoryLocationImpl : public AAMemoryLocation {
6007 
6008   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6009       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6010     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6011       AccessKind2Accesses[u] = nullptr;
6012   }
6013 
6014   ~AAMemoryLocationImpl() {
6015     // The AccessSets are allocated via a BumpPtrAllocator, we call
6016     // the destructor manually.
6017     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6018       if (AccessKind2Accesses[u])
6019         AccessKind2Accesses[u]->~AccessSet();
6020   }
6021 
6022   /// See AbstractAttribute::initialize(...).
6023   void initialize(Attributor &A) override {
6024     intersectAssumedBits(BEST_STATE);
6025     getKnownStateFromValue(A, getIRPosition(), getState());
6026     IRAttribute::initialize(A);
6027   }
6028 
6029   /// Return the memory behavior information encoded in the IR for \p IRP.
6030   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6031                                      BitIntegerState &State,
6032                                      bool IgnoreSubsumingPositions = false) {
6033     // For internal functions we ignore `argmemonly` and
6034     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6035     // constant propagation. It is unclear if this is the best way but it is
6036     // unlikely this will cause real performance problems. If we are deriving
6037     // attributes for the anchor function we even remove the attribute in
6038     // addition to ignoring it.
6039     bool UseArgMemOnly = true;
6040     Function *AnchorFn = IRP.getAnchorScope();
6041     if (AnchorFn && A.isRunOn(*AnchorFn))
6042       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6043 
6044     SmallVector<Attribute, 2> Attrs;
6045     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6046     for (const Attribute &Attr : Attrs) {
6047       switch (Attr.getKindAsEnum()) {
6048       case Attribute::ReadNone:
6049         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6050         break;
6051       case Attribute::InaccessibleMemOnly:
6052         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6053         break;
6054       case Attribute::ArgMemOnly:
6055         if (UseArgMemOnly)
6056           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6057         else
6058           IRP.removeAttrs({Attribute::ArgMemOnly});
6059         break;
6060       case Attribute::InaccessibleMemOrArgMemOnly:
6061         if (UseArgMemOnly)
6062           State.addKnownBits(inverseLocation(
6063               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6064         else
6065           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6066         break;
6067       default:
6068         llvm_unreachable("Unexpected attribute!");
6069       }
6070     }
6071   }
6072 
6073   /// See AbstractAttribute::getDeducedAttributes(...).
6074   void getDeducedAttributes(LLVMContext &Ctx,
6075                             SmallVectorImpl<Attribute> &Attrs) const override {
6076     assert(Attrs.size() == 0);
6077     if (isAssumedReadNone()) {
6078       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6079     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6080       if (isAssumedInaccessibleMemOnly())
6081         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6082       else if (isAssumedArgMemOnly())
6083         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6084       else if (isAssumedInaccessibleOrArgMemOnly())
6085         Attrs.push_back(
6086             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6087     }
6088     assert(Attrs.size() <= 1);
6089   }
6090 
6091   /// See AbstractAttribute::manifest(...).
6092   ChangeStatus manifest(Attributor &A) override {
6093     const IRPosition &IRP = getIRPosition();
6094 
6095     // Check if we would improve the existing attributes first.
6096     SmallVector<Attribute, 4> DeducedAttrs;
6097     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6098     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6099           return IRP.hasAttr(Attr.getKindAsEnum(),
6100                              /* IgnoreSubsumingPositions */ true);
6101         }))
6102       return ChangeStatus::UNCHANGED;
6103 
6104     // Clear existing attributes.
6105     IRP.removeAttrs(AttrKinds);
6106     if (isAssumedReadNone())
6107       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6108 
6109     // Use the generic manifest method.
6110     return IRAttribute::manifest(A);
6111   }
6112 
6113   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6114   bool checkForAllAccessesToMemoryKind(
6115       function_ref<bool(const Instruction *, const Value *, AccessKind,
6116                         MemoryLocationsKind)>
6117           Pred,
6118       MemoryLocationsKind RequestedMLK) const override {
6119     if (!isValidState())
6120       return false;
6121 
6122     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6123     if (AssumedMLK == NO_LOCATIONS)
6124       return true;
6125 
6126     unsigned Idx = 0;
6127     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6128          CurMLK *= 2, ++Idx) {
6129       if (CurMLK & RequestedMLK)
6130         continue;
6131 
6132       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6133         for (const AccessInfo &AI : *Accesses)
6134           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6135             return false;
6136     }
6137 
6138     return true;
6139   }
6140 
6141   ChangeStatus indicatePessimisticFixpoint() override {
6142     // If we give up and indicate a pessimistic fixpoint this instruction will
6143     // become an access for all potential access kinds:
6144     // TODO: Add pointers for argmemonly and globals to improve the results of
6145     //       checkForAllAccessesToMemoryKind.
6146     bool Changed = false;
6147     MemoryLocationsKind KnownMLK = getKnown();
6148     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6149     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6150       if (!(CurMLK & KnownMLK))
6151         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6152                                   getAccessKindFromInst(I));
6153     return AAMemoryLocation::indicatePessimisticFixpoint();
6154   }
6155 
6156 protected:
6157   /// Helper struct to tie together an instruction that has a read or write
6158   /// effect with the pointer it accesses (if any).
6159   struct AccessInfo {
6160 
6161     /// The instruction that caused the access.
6162     const Instruction *I;
6163 
6164     /// The base pointer that is accessed, or null if unknown.
6165     const Value *Ptr;
6166 
6167     /// The kind of access (read/write/read+write).
6168     AccessKind Kind;
6169 
6170     bool operator==(const AccessInfo &RHS) const {
6171       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6172     }
6173     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6174       if (LHS.I != RHS.I)
6175         return LHS.I < RHS.I;
6176       if (LHS.Ptr != RHS.Ptr)
6177         return LHS.Ptr < RHS.Ptr;
6178       if (LHS.Kind != RHS.Kind)
6179         return LHS.Kind < RHS.Kind;
6180       return false;
6181     }
6182   };
6183 
6184   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6185   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6186   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6187   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6188 
6189   /// Return the kind(s) of location that may be accessed by \p V.
6190   AAMemoryLocation::MemoryLocationsKind
6191   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6192 
6193   /// Return the access kind as determined by \p I.
6194   AccessKind getAccessKindFromInst(const Instruction *I) {
6195     AccessKind AK = READ_WRITE;
6196     if (I) {
6197       AK = I->mayReadFromMemory() ? READ : NONE;
6198       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6199     }
6200     return AK;
6201   }
6202 
6203   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6204   /// an access of kind \p AK to a \p MLK memory location with the access
6205   /// pointer \p Ptr.
6206   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6207                                  MemoryLocationsKind MLK, const Instruction *I,
6208                                  const Value *Ptr, bool &Changed,
6209                                  AccessKind AK = READ_WRITE) {
6210 
6211     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6212     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6213     if (!Accesses)
6214       Accesses = new (Allocator) AccessSet();
6215     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6216     State.removeAssumedBits(MLK);
6217   }
6218 
6219   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6220   /// arguments, and update the state and access map accordingly.
6221   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6222                           AAMemoryLocation::StateType &State, bool &Changed);
6223 
6224   /// Used to allocate access sets.
6225   BumpPtrAllocator &Allocator;
6226 
6227   /// The set of IR attributes AAMemoryLocation deals with.
6228   static const Attribute::AttrKind AttrKinds[4];
6229 };
6230 
6231 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6232     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6233     Attribute::InaccessibleMemOrArgMemOnly};
6234 
6235 void AAMemoryLocationImpl::categorizePtrValue(
6236     Attributor &A, const Instruction &I, const Value &Ptr,
6237     AAMemoryLocation::StateType &State, bool &Changed) {
6238   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6239                     << Ptr << " ["
6240                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6241 
6242   auto StripGEPCB = [](Value *V) -> Value * {
6243     auto *GEP = dyn_cast<GEPOperator>(V);
6244     while (GEP) {
6245       V = GEP->getPointerOperand();
6246       GEP = dyn_cast<GEPOperator>(V);
6247     }
6248     return V;
6249   };
6250 
6251   auto VisitValueCB = [&](Value &V, const Instruction *,
6252                           AAMemoryLocation::StateType &T,
6253                           bool Stripped) -> bool {
6254     MemoryLocationsKind MLK = NO_LOCATIONS;
6255     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6256     if (isa<UndefValue>(V))
6257       return true;
6258     if (auto *Arg = dyn_cast<Argument>(&V)) {
6259       if (Arg->hasByValAttr())
6260         MLK = NO_LOCAL_MEM;
6261       else
6262         MLK = NO_ARGUMENT_MEM;
6263     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6264       if (GV->hasLocalLinkage())
6265         MLK = NO_GLOBAL_INTERNAL_MEM;
6266       else
6267         MLK = NO_GLOBAL_EXTERNAL_MEM;
6268     } else if (isa<ConstantPointerNull>(V) &&
6269                !NullPointerIsDefined(getAssociatedFunction(),
6270                                      V.getType()->getPointerAddressSpace())) {
6271       return true;
6272     } else if (isa<AllocaInst>(V)) {
6273       MLK = NO_LOCAL_MEM;
6274     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6275       const auto &NoAliasAA =
6276           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6277       if (NoAliasAA.isAssumedNoAlias())
6278         MLK = NO_MALLOCED_MEM;
6279       else
6280         MLK = NO_UNKOWN_MEM;
6281     } else {
6282       MLK = NO_UNKOWN_MEM;
6283     }
6284 
6285     assert(MLK != NO_LOCATIONS && "No location specified!");
6286     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6287                               getAccessKindFromInst(&I));
6288     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6289                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6290                       << "\n");
6291     return true;
6292   };
6293 
6294   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6295           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6296           /* UseValueSimplify */ true,
6297           /* MaxValues */ 32, StripGEPCB)) {
6298     LLVM_DEBUG(
6299         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6300     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6301                               getAccessKindFromInst(&I));
6302   } else {
6303     LLVM_DEBUG(
6304         dbgs()
6305         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6306         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6307   }
6308 }
6309 
6310 AAMemoryLocation::MemoryLocationsKind
6311 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6312                                                   bool &Changed) {
6313   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6314                     << I << "\n");
6315 
6316   AAMemoryLocation::StateType AccessedLocs;
6317   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6318 
6319   if (auto *CB = dyn_cast<CallBase>(&I)) {
6320 
6321     // First check if we assume any memory is access is visible.
6322     const auto &CBMemLocationAA =
6323         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6324     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6325                       << " [" << CBMemLocationAA << "]\n");
6326 
6327     if (CBMemLocationAA.isAssumedReadNone())
6328       return NO_LOCATIONS;
6329 
6330     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6331       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6332                                 Changed, getAccessKindFromInst(&I));
6333       return AccessedLocs.getAssumed();
6334     }
6335 
6336     uint32_t CBAssumedNotAccessedLocs =
6337         CBMemLocationAA.getAssumedNotAccessedLocation();
6338 
6339     // Set the argmemonly and global bit as we handle them separately below.
6340     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6341         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6342 
6343     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6344       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6345         continue;
6346       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6347                                 getAccessKindFromInst(&I));
6348     }
6349 
6350     // Now handle global memory if it might be accessed. This is slightly tricky
6351     // as NO_GLOBAL_MEM has multiple bits set.
6352     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6353     if (HasGlobalAccesses) {
6354       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6355                             AccessKind Kind, MemoryLocationsKind MLK) {
6356         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6357                                   getAccessKindFromInst(&I));
6358         return true;
6359       };
6360       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6361               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6362         return AccessedLocs.getWorstState();
6363     }
6364 
6365     LLVM_DEBUG(
6366         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6367                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6368 
6369     // Now handle argument memory if it might be accessed.
6370     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6371     if (HasArgAccesses) {
6372       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6373            ++ArgNo) {
6374 
6375         // Skip non-pointer arguments.
6376         const Value *ArgOp = CB->getArgOperand(ArgNo);
6377         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6378           continue;
6379 
6380         // Skip readnone arguments.
6381         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6382         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6383             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6384 
6385         if (ArgOpMemLocationAA.isAssumedReadNone())
6386           continue;
6387 
6388         // Categorize potentially accessed pointer arguments as if there was an
6389         // access instruction with them as pointer.
6390         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6391       }
6392     }
6393 
6394     LLVM_DEBUG(
6395         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6396                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6397 
6398     return AccessedLocs.getAssumed();
6399   }
6400 
6401   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6402     LLVM_DEBUG(
6403         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6404                << I << " [" << *Ptr << "]\n");
6405     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6406     return AccessedLocs.getAssumed();
6407   }
6408 
6409   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6410                     << I << "\n");
6411   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6412                             getAccessKindFromInst(&I));
6413   return AccessedLocs.getAssumed();
6414 }
6415 
6416 /// An AA to represent the memory behavior function attributes.
6417 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6418   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6419       : AAMemoryLocationImpl(IRP, A) {}
6420 
6421   /// See AbstractAttribute::updateImpl(Attributor &A).
6422   virtual ChangeStatus updateImpl(Attributor &A) override {
6423 
6424     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6425         *this, getIRPosition(), /* TrackDependence */ false);
6426     if (MemBehaviorAA.isAssumedReadNone()) {
6427       if (MemBehaviorAA.isKnownReadNone())
6428         return indicateOptimisticFixpoint();
6429       assert(isAssumedReadNone() &&
6430              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6431       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6432       return ChangeStatus::UNCHANGED;
6433     }
6434 
6435     // The current assumed state used to determine a change.
6436     auto AssumedState = getAssumed();
6437     bool Changed = false;
6438 
6439     auto CheckRWInst = [&](Instruction &I) {
6440       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6441       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6442                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6443       removeAssumedBits(inverseLocation(MLK, false, false));
6444       return true;
6445     };
6446 
6447     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6448       return indicatePessimisticFixpoint();
6449 
6450     Changed |= AssumedState != getAssumed();
6451     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6452   }
6453 
6454   /// See AbstractAttribute::trackStatistics()
6455   void trackStatistics() const override {
6456     if (isAssumedReadNone())
6457       STATS_DECLTRACK_FN_ATTR(readnone)
6458     else if (isAssumedArgMemOnly())
6459       STATS_DECLTRACK_FN_ATTR(argmemonly)
6460     else if (isAssumedInaccessibleMemOnly())
6461       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6462     else if (isAssumedInaccessibleOrArgMemOnly())
6463       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6464   }
6465 };
6466 
6467 /// AAMemoryLocation attribute for call sites.
6468 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6469   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6470       : AAMemoryLocationImpl(IRP, A) {}
6471 
6472   /// See AbstractAttribute::initialize(...).
6473   void initialize(Attributor &A) override {
6474     AAMemoryLocationImpl::initialize(A);
6475     Function *F = getAssociatedFunction();
6476     if (!F || !A.isFunctionIPOAmendable(*F)) {
6477       indicatePessimisticFixpoint();
6478       return;
6479     }
6480   }
6481 
6482   /// See AbstractAttribute::updateImpl(...).
6483   ChangeStatus updateImpl(Attributor &A) override {
6484     // TODO: Once we have call site specific value information we can provide
6485     //       call site specific liveness liveness information and then it makes
6486     //       sense to specialize attributes for call sites arguments instead of
6487     //       redirecting requests to the callee argument.
6488     Function *F = getAssociatedFunction();
6489     const IRPosition &FnPos = IRPosition::function(*F);
6490     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6491     bool Changed = false;
6492     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6493                           AccessKind Kind, MemoryLocationsKind MLK) {
6494       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6495                                 getAccessKindFromInst(I));
6496       return true;
6497     };
6498     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6499       return indicatePessimisticFixpoint();
6500     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6501   }
6502 
6503   /// See AbstractAttribute::trackStatistics()
6504   void trackStatistics() const override {
6505     if (isAssumedReadNone())
6506       STATS_DECLTRACK_CS_ATTR(readnone)
6507   }
6508 };
6509 
6510 /// ------------------ Value Constant Range Attribute -------------------------
6511 
6512 struct AAValueConstantRangeImpl : AAValueConstantRange {
6513   using StateType = IntegerRangeState;
6514   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6515       : AAValueConstantRange(IRP, A) {}
6516 
6517   /// See AbstractAttribute::getAsStr().
6518   const std::string getAsStr() const override {
6519     std::string Str;
6520     llvm::raw_string_ostream OS(Str);
6521     OS << "range(" << getBitWidth() << ")<";
6522     getKnown().print(OS);
6523     OS << " / ";
6524     getAssumed().print(OS);
6525     OS << ">";
6526     return OS.str();
6527   }
6528 
6529   /// Helper function to get a SCEV expr for the associated value at program
6530   /// point \p I.
6531   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6532     if (!getAnchorScope())
6533       return nullptr;
6534 
6535     ScalarEvolution *SE =
6536         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6537             *getAnchorScope());
6538 
6539     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6540         *getAnchorScope());
6541 
6542     if (!SE || !LI)
6543       return nullptr;
6544 
6545     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6546     if (!I)
6547       return S;
6548 
6549     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6550   }
6551 
6552   /// Helper function to get a range from SCEV for the associated value at
6553   /// program point \p I.
6554   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6555                                          const Instruction *I = nullptr) const {
6556     if (!getAnchorScope())
6557       return getWorstState(getBitWidth());
6558 
6559     ScalarEvolution *SE =
6560         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6561             *getAnchorScope());
6562 
6563     const SCEV *S = getSCEV(A, I);
6564     if (!SE || !S)
6565       return getWorstState(getBitWidth());
6566 
6567     return SE->getUnsignedRange(S);
6568   }
6569 
6570   /// Helper function to get a range from LVI for the associated value at
6571   /// program point \p I.
6572   ConstantRange
6573   getConstantRangeFromLVI(Attributor &A,
6574                           const Instruction *CtxI = nullptr) const {
6575     if (!getAnchorScope())
6576       return getWorstState(getBitWidth());
6577 
6578     LazyValueInfo *LVI =
6579         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6580             *getAnchorScope());
6581 
6582     if (!LVI || !CtxI)
6583       return getWorstState(getBitWidth());
6584     return LVI->getConstantRange(&getAssociatedValue(),
6585                                  const_cast<BasicBlock *>(CtxI->getParent()),
6586                                  const_cast<Instruction *>(CtxI));
6587   }
6588 
6589   /// See AAValueConstantRange::getKnownConstantRange(..).
6590   ConstantRange
6591   getKnownConstantRange(Attributor &A,
6592                         const Instruction *CtxI = nullptr) const override {
6593     if (!CtxI || CtxI == getCtxI())
6594       return getKnown();
6595 
6596     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6597     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6598     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6599   }
6600 
6601   /// See AAValueConstantRange::getAssumedConstantRange(..).
6602   ConstantRange
6603   getAssumedConstantRange(Attributor &A,
6604                           const Instruction *CtxI = nullptr) const override {
6605     // TODO: Make SCEV use Attributor assumption.
6606     //       We may be able to bound a variable range via assumptions in
6607     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6608     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6609 
6610     if (!CtxI || CtxI == getCtxI())
6611       return getAssumed();
6612 
6613     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6614     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6615     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6616   }
6617 
6618   /// See AbstractAttribute::initialize(..).
6619   void initialize(Attributor &A) override {
6620     // Intersect a range given by SCEV.
6621     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6622 
6623     // Intersect a range given by LVI.
6624     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6625   }
6626 
6627   /// Helper function to create MDNode for range metadata.
6628   static MDNode *
6629   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6630                             const ConstantRange &AssumedConstantRange) {
6631     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6632                                   Ty, AssumedConstantRange.getLower())),
6633                               ConstantAsMetadata::get(ConstantInt::get(
6634                                   Ty, AssumedConstantRange.getUpper()))};
6635     return MDNode::get(Ctx, LowAndHigh);
6636   }
6637 
6638   /// Return true if \p Assumed is included in \p KnownRanges.
6639   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6640 
6641     if (Assumed.isFullSet())
6642       return false;
6643 
6644     if (!KnownRanges)
6645       return true;
6646 
6647     // If multiple ranges are annotated in IR, we give up to annotate assumed
6648     // range for now.
6649 
6650     // TODO:  If there exists a known range which containts assumed range, we
6651     // can say assumed range is better.
6652     if (KnownRanges->getNumOperands() > 2)
6653       return false;
6654 
6655     ConstantInt *Lower =
6656         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6657     ConstantInt *Upper =
6658         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6659 
6660     ConstantRange Known(Lower->getValue(), Upper->getValue());
6661     return Known.contains(Assumed) && Known != Assumed;
6662   }
6663 
6664   /// Helper function to set range metadata.
6665   static bool
6666   setRangeMetadataIfisBetterRange(Instruction *I,
6667                                   const ConstantRange &AssumedConstantRange) {
6668     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6669     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6670       if (!AssumedConstantRange.isEmptySet()) {
6671         I->setMetadata(LLVMContext::MD_range,
6672                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6673                                                  AssumedConstantRange));
6674         return true;
6675       }
6676     }
6677     return false;
6678   }
6679 
6680   /// See AbstractAttribute::manifest()
6681   ChangeStatus manifest(Attributor &A) override {
6682     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6683     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6684     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6685 
6686     auto &V = getAssociatedValue();
6687     if (!AssumedConstantRange.isEmptySet() &&
6688         !AssumedConstantRange.isSingleElement()) {
6689       if (Instruction *I = dyn_cast<Instruction>(&V))
6690         if (isa<CallInst>(I) || isa<LoadInst>(I))
6691           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6692             Changed = ChangeStatus::CHANGED;
6693     }
6694 
6695     return Changed;
6696   }
6697 };
6698 
6699 struct AAValueConstantRangeArgument final
6700     : AAArgumentFromCallSiteArguments<
6701           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6702   using Base = AAArgumentFromCallSiteArguments<
6703       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6704   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6705       : Base(IRP, A) {}
6706 
6707   /// See AbstractAttribute::initialize(..).
6708   void initialize(Attributor &A) override {
6709     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6710       indicatePessimisticFixpoint();
6711     } else {
6712       Base::initialize(A);
6713     }
6714   }
6715 
6716   /// See AbstractAttribute::trackStatistics()
6717   void trackStatistics() const override {
6718     STATS_DECLTRACK_ARG_ATTR(value_range)
6719   }
6720 };
6721 
6722 struct AAValueConstantRangeReturned
6723     : AAReturnedFromReturnedValues<AAValueConstantRange,
6724                                    AAValueConstantRangeImpl> {
6725   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6726                                             AAValueConstantRangeImpl>;
6727   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6728       : Base(IRP, A) {}
6729 
6730   /// See AbstractAttribute::initialize(...).
6731   void initialize(Attributor &A) override {}
6732 
6733   /// See AbstractAttribute::trackStatistics()
6734   void trackStatistics() const override {
6735     STATS_DECLTRACK_FNRET_ATTR(value_range)
6736   }
6737 };
6738 
6739 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6740   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6741       : AAValueConstantRangeImpl(IRP, A) {}
6742 
6743   /// See AbstractAttribute::initialize(...).
6744   void initialize(Attributor &A) override {
6745     AAValueConstantRangeImpl::initialize(A);
6746     Value &V = getAssociatedValue();
6747 
6748     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6749       unionAssumed(ConstantRange(C->getValue()));
6750       indicateOptimisticFixpoint();
6751       return;
6752     }
6753 
6754     if (isa<UndefValue>(&V)) {
6755       // Collapse the undef state to 0.
6756       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6757       indicateOptimisticFixpoint();
6758       return;
6759     }
6760 
6761     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6762       return;
6763     // If it is a load instruction with range metadata, use it.
6764     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6765       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6766         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6767         return;
6768       }
6769 
6770     // We can work with PHI and select instruction as we traverse their operands
6771     // during update.
6772     if (isa<SelectInst>(V) || isa<PHINode>(V))
6773       return;
6774 
6775     // Otherwise we give up.
6776     indicatePessimisticFixpoint();
6777 
6778     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6779                       << getAssociatedValue() << "\n");
6780   }
6781 
6782   bool calculateBinaryOperator(
6783       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6784       const Instruction *CtxI,
6785       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6786     Value *LHS = BinOp->getOperand(0);
6787     Value *RHS = BinOp->getOperand(1);
6788     // TODO: Allow non integers as well.
6789     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6790       return false;
6791 
6792     auto &LHSAA =
6793         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6794     QuerriedAAs.push_back(&LHSAA);
6795     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6796 
6797     auto &RHSAA =
6798         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6799     QuerriedAAs.push_back(&RHSAA);
6800     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6801 
6802     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6803 
6804     T.unionAssumed(AssumedRange);
6805 
6806     // TODO: Track a known state too.
6807 
6808     return T.isValidState();
6809   }
6810 
6811   bool calculateCastInst(
6812       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6813       const Instruction *CtxI,
6814       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6815     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6816     // TODO: Allow non integers as well.
6817     Value &OpV = *CastI->getOperand(0);
6818     if (!OpV.getType()->isIntegerTy())
6819       return false;
6820 
6821     auto &OpAA =
6822         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
6823     QuerriedAAs.push_back(&OpAA);
6824     T.unionAssumed(
6825         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
6826     return T.isValidState();
6827   }
6828 
6829   bool
6830   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
6831                    const Instruction *CtxI,
6832                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6833     Value *LHS = CmpI->getOperand(0);
6834     Value *RHS = CmpI->getOperand(1);
6835     // TODO: Allow non integers as well.
6836     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6837       return false;
6838 
6839     auto &LHSAA =
6840         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6841     QuerriedAAs.push_back(&LHSAA);
6842     auto &RHSAA =
6843         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6844     QuerriedAAs.push_back(&RHSAA);
6845 
6846     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6847     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6848 
6849     // If one of them is empty set, we can't decide.
6850     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
6851       return true;
6852 
6853     bool MustTrue = false, MustFalse = false;
6854 
6855     auto AllowedRegion =
6856         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
6857 
6858     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
6859         CmpI->getPredicate(), RHSAARange);
6860 
6861     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
6862       MustFalse = true;
6863 
6864     if (SatisfyingRegion.contains(LHSAARange))
6865       MustTrue = true;
6866 
6867     assert((!MustTrue || !MustFalse) &&
6868            "Either MustTrue or MustFalse should be false!");
6869 
6870     if (MustTrue)
6871       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
6872     else if (MustFalse)
6873       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
6874     else
6875       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
6876 
6877     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
6878                       << " " << RHSAA << "\n");
6879 
6880     // TODO: Track a known state too.
6881     return T.isValidState();
6882   }
6883 
6884   /// See AbstractAttribute::updateImpl(...).
6885   ChangeStatus updateImpl(Attributor &A) override {
6886     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
6887                             IntegerRangeState &T, bool Stripped) -> bool {
6888       Instruction *I = dyn_cast<Instruction>(&V);
6889       if (!I || isa<CallBase>(I)) {
6890 
6891         // If the value is not instruction, we query AA to Attributor.
6892         const auto &AA =
6893             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
6894 
6895         // Clamp operator is not used to utilize a program point CtxI.
6896         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
6897 
6898         return T.isValidState();
6899       }
6900 
6901       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
6902       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
6903         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
6904           return false;
6905       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
6906         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
6907           return false;
6908       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
6909         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
6910           return false;
6911       } else {
6912         // Give up with other instructions.
6913         // TODO: Add other instructions
6914 
6915         T.indicatePessimisticFixpoint();
6916         return false;
6917       }
6918 
6919       // Catch circular reasoning in a pessimistic way for now.
6920       // TODO: Check how the range evolves and if we stripped anything, see also
6921       //       AADereferenceable or AAAlign for similar situations.
6922       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
6923         if (QueriedAA != this)
6924           continue;
6925         // If we are in a stady state we do not need to worry.
6926         if (T.getAssumed() == getState().getAssumed())
6927           continue;
6928         T.indicatePessimisticFixpoint();
6929       }
6930 
6931       return T.isValidState();
6932     };
6933 
6934     IntegerRangeState T(getBitWidth());
6935 
6936     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
6937             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
6938             /* UseValueSimplify */ false))
6939       return indicatePessimisticFixpoint();
6940 
6941     return clampStateAndIndicateChange(getState(), T);
6942   }
6943 
6944   /// See AbstractAttribute::trackStatistics()
6945   void trackStatistics() const override {
6946     STATS_DECLTRACK_FLOATING_ATTR(value_range)
6947   }
6948 };
6949 
6950 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
6951   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
6952       : AAValueConstantRangeImpl(IRP, A) {}
6953 
6954   /// See AbstractAttribute::initialize(...).
6955   ChangeStatus updateImpl(Attributor &A) override {
6956     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
6957                      "not be called");
6958   }
6959 
6960   /// See AbstractAttribute::trackStatistics()
6961   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
6962 };
6963 
6964 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
6965   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
6966       : AAValueConstantRangeFunction(IRP, A) {}
6967 
6968   /// See AbstractAttribute::trackStatistics()
6969   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
6970 };
6971 
6972 struct AAValueConstantRangeCallSiteReturned
6973     : AACallSiteReturnedFromReturned<AAValueConstantRange,
6974                                      AAValueConstantRangeImpl> {
6975   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
6976       : AACallSiteReturnedFromReturned<AAValueConstantRange,
6977                                        AAValueConstantRangeImpl>(IRP, A) {}
6978 
6979   /// See AbstractAttribute::initialize(...).
6980   void initialize(Attributor &A) override {
6981     // If it is a load instruction with range metadata, use the metadata.
6982     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
6983       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
6984         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6985 
6986     AAValueConstantRangeImpl::initialize(A);
6987   }
6988 
6989   /// See AbstractAttribute::trackStatistics()
6990   void trackStatistics() const override {
6991     STATS_DECLTRACK_CSRET_ATTR(value_range)
6992   }
6993 };
6994 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
6995   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
6996       : AAValueConstantRangeFloating(IRP, A) {}
6997 
6998   /// See AbstractAttribute::trackStatistics()
6999   void trackStatistics() const override {
7000     STATS_DECLTRACK_CSARG_ATTR(value_range)
7001   }
7002 };
7003 } // namespace
7004 
7005 const char AAReturnedValues::ID = 0;
7006 const char AANoUnwind::ID = 0;
7007 const char AANoSync::ID = 0;
7008 const char AANoFree::ID = 0;
7009 const char AANonNull::ID = 0;
7010 const char AANoRecurse::ID = 0;
7011 const char AAWillReturn::ID = 0;
7012 const char AAUndefinedBehavior::ID = 0;
7013 const char AANoAlias::ID = 0;
7014 const char AAReachability::ID = 0;
7015 const char AANoReturn::ID = 0;
7016 const char AAIsDead::ID = 0;
7017 const char AADereferenceable::ID = 0;
7018 const char AAAlign::ID = 0;
7019 const char AANoCapture::ID = 0;
7020 const char AAValueSimplify::ID = 0;
7021 const char AAHeapToStack::ID = 0;
7022 const char AAPrivatizablePtr::ID = 0;
7023 const char AAMemoryBehavior::ID = 0;
7024 const char AAMemoryLocation::ID = 0;
7025 const char AAValueConstantRange::ID = 0;
7026 
7027 // Macro magic to create the static generator function for attributes that
7028 // follow the naming scheme.
7029 
7030 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7031   case IRPosition::PK:                                                         \
7032     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7033 
7034 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7035   case IRPosition::PK:                                                         \
7036     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7037     ++NumAAs;                                                                  \
7038     break;
7039 
7040 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7041   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7042     CLASS *AA = nullptr;                                                       \
7043     switch (IRP.getPositionKind()) {                                           \
7044       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7045       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7046       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7047       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7048       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7049       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7050       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7051       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7052     }                                                                          \
7053     return *AA;                                                                \
7054   }
7055 
7056 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7057   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7058     CLASS *AA = nullptr;                                                       \
7059     switch (IRP.getPositionKind()) {                                           \
7060       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7061       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7062       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7063       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7064       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7065       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7066       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7067       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7068     }                                                                          \
7069     return *AA;                                                                \
7070   }
7071 
7072 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7073   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7074     CLASS *AA = nullptr;                                                       \
7075     switch (IRP.getPositionKind()) {                                           \
7076       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7077       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7078       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7079       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7080       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7081       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7082       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7083       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7084     }                                                                          \
7085     return *AA;                                                                \
7086   }
7087 
7088 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7089   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7090     CLASS *AA = nullptr;                                                       \
7091     switch (IRP.getPositionKind()) {                                           \
7092       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7093       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7094       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7095       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7096       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7097       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7098       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7099       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7100     }                                                                          \
7101     return *AA;                                                                \
7102   }
7103 
7104 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7105   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7106     CLASS *AA = nullptr;                                                       \
7107     switch (IRP.getPositionKind()) {                                           \
7108       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7109       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7110       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7111       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7112       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7113       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7114       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7115       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7116     }                                                                          \
7117     return *AA;                                                                \
7118   }
7119 
7120 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7121 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7122 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7123 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7124 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7125 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7126 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7127 
7128 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7129 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7130 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7131 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7132 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7133 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7134 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7135 
7136 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7137 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7138 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7139 
7140 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7141 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7142 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7143 
7144 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7145 
7146 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7147 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7148 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7149 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7150 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7151 #undef SWITCH_PK_CREATE
7152 #undef SWITCH_PK_INV
7153