1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (auto *LI = dyn_cast<LoadInst>(I)) {
159     if (!AllowVolatile && LI->isVolatile())
160       return nullptr;
161     return LI->getPointerOperand();
162   }
163 
164   if (auto *SI = dyn_cast<StoreInst>(I)) {
165     if (!AllowVolatile && SI->isVolatile())
166       return nullptr;
167     return SI->getPointerOperand();
168   }
169 
170   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
171     if (!AllowVolatile && CXI->isVolatile())
172       return nullptr;
173     return CXI->getPointerOperand();
174   }
175 
176   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
177     if (!AllowVolatile && RMWI->isVolatile())
178       return nullptr;
179     return RMWI->getPointerOperand();
180   }
181 
182   return nullptr;
183 }
184 
185 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
186 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
187 /// getelement pointer instructions that traverse the natural type of \p Ptr if
188 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
189 /// through a cast to i8*.
190 ///
191 /// TODO: This could probably live somewhere more prominantly if it doesn't
192 ///       already exist.
193 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
194                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
195   assert(Offset >= 0 && "Negative offset not supported yet!");
196   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
197                     << "-bytes as " << *ResTy << "\n");
198 
199   // The initial type we are trying to traverse to get nice GEPs.
200   Type *Ty = Ptr->getType();
201 
202   SmallVector<Value *, 4> Indices;
203   std::string GEPName = Ptr->getName().str();
204   while (Offset) {
205     uint64_t Idx, Rem;
206 
207     if (auto *STy = dyn_cast<StructType>(Ty)) {
208       const StructLayout *SL = DL.getStructLayout(STy);
209       if (int64_t(SL->getSizeInBytes()) < Offset)
210         break;
211       Idx = SL->getElementContainingOffset(Offset);
212       assert(Idx < STy->getNumElements() && "Offset calculation error!");
213       Rem = Offset - SL->getElementOffset(Idx);
214       Ty = STy->getElementType(Idx);
215     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
216       Ty = PTy->getElementType();
217       if (!Ty->isSized())
218         break;
219       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
220       assert(ElementSize && "Expected type with size!");
221       Idx = Offset / ElementSize;
222       Rem = Offset % ElementSize;
223     } else {
224       // Non-aggregate type, we cast and make byte-wise progress now.
225       break;
226     }
227 
228     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
229                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
230 
231     GEPName += "." + std::to_string(Idx);
232     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
233     Offset = Rem;
234   }
235 
236   // Create a GEP if we collected indices above.
237   if (Indices.size())
238     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
239 
240   // If an offset is left we use byte-wise adjustment.
241   if (Offset) {
242     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
244                         GEPName + ".b" + Twine(Offset));
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
249 
250   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
251   return Ptr;
252 }
253 
254 /// Recursively visit all values that might become \p IRP at some point. This
255 /// will be done by looking through cast instructions, selects, phis, and calls
256 /// with the "returned" attribute. Once we cannot look through the value any
257 /// further, the callback \p VisitValueCB is invoked and passed the current
258 /// value, the \p State, and a flag to indicate if we stripped anything.
259 /// Stripped means that we unpacked the value associated with \p IRP at least
260 /// once. Note that the value used for the callback may still be the value
261 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
262 /// we will never visit more values than specified by \p MaxValues.
263 template <typename AAType, typename StateTy>
264 static bool genericValueTraversal(
265     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
266     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
267         VisitValueCB,
268     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
269     function_ref<Value *(Value *)> StripCB = nullptr) {
270 
271   const AAIsDead *LivenessAA = nullptr;
272   if (IRP.getAnchorScope())
273     LivenessAA = &A.getAAFor<AAIsDead>(
274         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
275         DepClassTy::NONE);
276   bool AnyDead = false;
277 
278   using Item = std::pair<Value *, const Instruction *>;
279   SmallSet<Item, 16> Visited;
280   SmallVector<Item, 16> Worklist;
281   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
282 
283   int Iteration = 0;
284   do {
285     Item I = Worklist.pop_back_val();
286     Value *V = I.first;
287     CtxI = I.second;
288     if (StripCB)
289       V = StripCB(V);
290 
291     // Check if we should process the current value. To prevent endless
292     // recursion keep a record of the values we followed!
293     if (!Visited.insert(I).second)
294       continue;
295 
296     // Make sure we limit the compile time for complex expressions.
297     if (Iteration++ >= MaxValues)
298       return false;
299 
300     // Explicitly look through calls with a "returned" attribute if we do
301     // not have a pointer as stripPointerCasts only works on them.
302     Value *NewV = nullptr;
303     if (V->getType()->isPointerTy()) {
304       NewV = V->stripPointerCasts();
305     } else {
306       auto *CB = dyn_cast<CallBase>(V);
307       if (CB && CB->getCalledFunction()) {
308         for (Argument &Arg : CB->getCalledFunction()->args())
309           if (Arg.hasReturnedAttr()) {
310             NewV = CB->getArgOperand(Arg.getArgNo());
311             break;
312           }
313       }
314     }
315     if (NewV && NewV != V) {
316       Worklist.push_back({NewV, CtxI});
317       continue;
318     }
319 
320     // Look through select instructions, visit both potential values.
321     if (auto *SI = dyn_cast<SelectInst>(V)) {
322       Worklist.push_back({SI->getTrueValue(), CtxI});
323       Worklist.push_back({SI->getFalseValue(), CtxI});
324       continue;
325     }
326 
327     // Look through phi nodes, visit all live operands.
328     if (auto *PHI = dyn_cast<PHINode>(V)) {
329       assert(LivenessAA &&
330              "Expected liveness in the presence of instructions!");
331       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
332         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
333         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
334                             LivenessAA,
335                             /* CheckBBLivenessOnly */ true)) {
336           AnyDead = true;
337           continue;
338         }
339         Worklist.push_back(
340             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
341       }
342       continue;
343     }
344 
345     if (UseValueSimplify && !isa<Constant>(V)) {
346       bool UsedAssumedInformation = false;
347       Optional<Constant *> C =
348           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
349       if (!C.hasValue())
350         continue;
351       if (Value *NewV = C.getValue()) {
352         Worklist.push_back({NewV, CtxI});
353         continue;
354       }
355     }
356 
357     // Once a leaf is reached we inform the user through the callback.
358     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
359       return false;
360   } while (!Worklist.empty());
361 
362   // If we actually used liveness information so we have to record a dependence.
363   if (AnyDead)
364     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
365 
366   // All values have been visited.
367   return true;
368 }
369 
370 const Value *stripAndAccumulateMinimalOffsets(
371     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
372     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
373     bool UseAssumed = false) {
374 
375   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
376     const IRPosition &Pos = IRPosition::value(V);
377     // Only track dependence if we are going to use the assumed info.
378     const AAValueConstantRange &ValueConstantRangeAA =
379         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
380                                          UseAssumed ? DepClassTy::OPTIONAL
381                                                     : DepClassTy::NONE);
382     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
383                                      : ValueConstantRangeAA.getKnown();
384     // We can only use the lower part of the range because the upper part can
385     // be higher than what the value can really be.
386     ROffset = Range.getSignedMin();
387     return true;
388   };
389 
390   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
391                                                 AttributorAnalysis);
392 }
393 
394 static const Value *getMinimalBaseOfAccsesPointerOperand(
395     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
396     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
397   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
398   if (!Ptr)
399     return nullptr;
400   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
401   const Value *Base = stripAndAccumulateMinimalOffsets(
402       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
403 
404   BytesOffset = OffsetAPInt.getSExtValue();
405   return Base;
406 }
407 
408 static const Value *
409 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
410                                      const DataLayout &DL,
411                                      bool AllowNonInbounds = false) {
412   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
413   if (!Ptr)
414     return nullptr;
415 
416   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
417                                           AllowNonInbounds);
418 }
419 
420 /// Helper function to clamp a state \p S of type \p StateType with the
421 /// information in \p R and indicate/return if \p S did change (as-in update is
422 /// required to be run again).
423 template <typename StateType>
424 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
425   auto Assumed = S.getAssumed();
426   S ^= R;
427   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
428                                    : ChangeStatus::CHANGED;
429 }
430 
431 /// Clamp the information known for all returned values of a function
432 /// (identified by \p QueryingAA) into \p S.
433 template <typename AAType, typename StateType = typename AAType::StateType>
434 static void clampReturnedValueStates(
435     Attributor &A, const AAType &QueryingAA, StateType &S,
436     const IRPosition::CallBaseContext *CBContext = nullptr) {
437   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
438                     << QueryingAA << " into " << S << "\n");
439 
440   assert((QueryingAA.getIRPosition().getPositionKind() ==
441               IRPosition::IRP_RETURNED ||
442           QueryingAA.getIRPosition().getPositionKind() ==
443               IRPosition::IRP_CALL_SITE_RETURNED) &&
444          "Can only clamp returned value states for a function returned or call "
445          "site returned position!");
446 
447   // Use an optional state as there might not be any return values and we want
448   // to join (IntegerState::operator&) the state of all there are.
449   Optional<StateType> T;
450 
451   // Callback for each possibly returned value.
452   auto CheckReturnValue = [&](Value &RV) -> bool {
453     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
454     const AAType &AA =
455         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
456     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
457                       << " @ " << RVPos << "\n");
458     const StateType &AAS = AA.getState();
459     if (T.hasValue())
460       *T &= AAS;
461     else
462       T = AAS;
463     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
464                       << "\n");
465     return T->isValidState();
466   };
467 
468   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
469     S.indicatePessimisticFixpoint();
470   else if (T.hasValue())
471     S ^= *T;
472 }
473 
474 /// Helper class for generic deduction: return value -> returned position.
475 template <typename AAType, typename BaseType,
476           typename StateType = typename BaseType::StateType,
477           bool PropagateCallBaseContext = false>
478 struct AAReturnedFromReturnedValues : public BaseType {
479   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
480       : BaseType(IRP, A) {}
481 
482   /// See AbstractAttribute::updateImpl(...).
483   ChangeStatus updateImpl(Attributor &A) override {
484     StateType S(StateType::getBestState(this->getState()));
485     clampReturnedValueStates<AAType, StateType>(
486         A, *this, S,
487         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
488     // TODO: If we know we visited all returned values, thus no are assumed
489     // dead, we can take the known information from the state T.
490     return clampStateAndIndicateChange<StateType>(this->getState(), S);
491   }
492 };
493 
494 /// Clamp the information known at all call sites for a given argument
495 /// (identified by \p QueryingAA) into \p S.
496 template <typename AAType, typename StateType = typename AAType::StateType>
497 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
498                                         StateType &S) {
499   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
500                     << QueryingAA << " into " << S << "\n");
501 
502   assert(QueryingAA.getIRPosition().getPositionKind() ==
503              IRPosition::IRP_ARGUMENT &&
504          "Can only clamp call site argument states for an argument position!");
505 
506   // Use an optional state as there might not be any return values and we want
507   // to join (IntegerState::operator&) the state of all there are.
508   Optional<StateType> T;
509 
510   // The argument number which is also the call site argument number.
511   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
512 
513   auto CallSiteCheck = [&](AbstractCallSite ACS) {
514     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
515     // Check if a coresponding argument was found or if it is on not associated
516     // (which can happen for callback calls).
517     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
518       return false;
519 
520     const AAType &AA =
521         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
522     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
523                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
524     const StateType &AAS = AA.getState();
525     if (T.hasValue())
526       *T &= AAS;
527     else
528       T = AAS;
529     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
530                       << "\n");
531     return T->isValidState();
532   };
533 
534   bool AllCallSitesKnown;
535   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
536                               AllCallSitesKnown))
537     S.indicatePessimisticFixpoint();
538   else if (T.hasValue())
539     S ^= *T;
540 }
541 
542 /// This function is the bridge between argument position and the call base
543 /// context.
544 template <typename AAType, typename BaseType,
545           typename StateType = typename AAType::StateType>
546 bool getArgumentStateFromCallBaseContext(Attributor &A,
547                                          BaseType &QueryingAttribute,
548                                          IRPosition &Pos, StateType &State) {
549   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
550          "Expected an 'argument' position !");
551   const CallBase *CBContext = Pos.getCallBaseContext();
552   if (!CBContext)
553     return false;
554 
555   int ArgNo = Pos.getCallSiteArgNo();
556   assert(ArgNo >= 0 && "Invalid Arg No!");
557 
558   const auto &AA = A.getAAFor<AAType>(
559       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
560       DepClassTy::REQUIRED);
561   const StateType &CBArgumentState =
562       static_cast<const StateType &>(AA.getState());
563 
564   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
565                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
566                     << "\n");
567 
568   // NOTE: If we want to do call site grouping it should happen here.
569   State ^= CBArgumentState;
570   return true;
571 }
572 
573 /// Helper class for generic deduction: call site argument -> argument position.
574 template <typename AAType, typename BaseType,
575           typename StateType = typename AAType::StateType,
576           bool BridgeCallBaseContext = false>
577 struct AAArgumentFromCallSiteArguments : public BaseType {
578   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
579       : BaseType(IRP, A) {}
580 
581   /// See AbstractAttribute::updateImpl(...).
582   ChangeStatus updateImpl(Attributor &A) override {
583     StateType S = StateType::getBestState(this->getState());
584 
585     if (BridgeCallBaseContext) {
586       bool Success =
587           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
588               A, *this, this->getIRPosition(), S);
589       if (Success)
590         return clampStateAndIndicateChange<StateType>(this->getState(), S);
591     }
592     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
593 
594     // TODO: If we know we visited all incoming values, thus no are assumed
595     // dead, we can take the known information from the state T.
596     return clampStateAndIndicateChange<StateType>(this->getState(), S);
597   }
598 };
599 
600 /// Helper class for generic replication: function returned -> cs returned.
601 template <typename AAType, typename BaseType,
602           typename StateType = typename BaseType::StateType,
603           bool IntroduceCallBaseContext = false>
604 struct AACallSiteReturnedFromReturned : public BaseType {
605   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
606       : BaseType(IRP, A) {}
607 
608   /// See AbstractAttribute::updateImpl(...).
609   ChangeStatus updateImpl(Attributor &A) override {
610     assert(this->getIRPosition().getPositionKind() ==
611                IRPosition::IRP_CALL_SITE_RETURNED &&
612            "Can only wrap function returned positions for call site returned "
613            "positions!");
614     auto &S = this->getState();
615 
616     const Function *AssociatedFunction =
617         this->getIRPosition().getAssociatedFunction();
618     if (!AssociatedFunction)
619       return S.indicatePessimisticFixpoint();
620 
621     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
622     if (IntroduceCallBaseContext)
623       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
624                         << CBContext << "\n");
625 
626     IRPosition FnPos = IRPosition::returned(
627         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
628     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
629     return clampStateAndIndicateChange(S, AA.getState());
630   }
631 };
632 
633 /// Helper function to accumulate uses.
634 template <class AAType, typename StateType = typename AAType::StateType>
635 static void followUsesInContext(AAType &AA, Attributor &A,
636                                 MustBeExecutedContextExplorer &Explorer,
637                                 const Instruction *CtxI,
638                                 SetVector<const Use *> &Uses,
639                                 StateType &State) {
640   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
641   for (unsigned u = 0; u < Uses.size(); ++u) {
642     const Use *U = Uses[u];
643     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
644       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
645       if (Found && AA.followUseInMBEC(A, U, UserI, State))
646         for (const Use &Us : UserI->uses())
647           Uses.insert(&Us);
648     }
649   }
650 }
651 
652 /// Use the must-be-executed-context around \p I to add information into \p S.
653 /// The AAType class is required to have `followUseInMBEC` method with the
654 /// following signature and behaviour:
655 ///
656 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
657 /// U - Underlying use.
658 /// I - The user of the \p U.
659 /// Returns true if the value should be tracked transitively.
660 ///
661 template <class AAType, typename StateType = typename AAType::StateType>
662 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
663                              Instruction &CtxI) {
664 
665   // Container for (transitive) uses of the associated value.
666   SetVector<const Use *> Uses;
667   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
668     Uses.insert(&U);
669 
670   MustBeExecutedContextExplorer &Explorer =
671       A.getInfoCache().getMustBeExecutedContextExplorer();
672 
673   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
674 
675   if (S.isAtFixpoint())
676     return;
677 
678   SmallVector<const BranchInst *, 4> BrInsts;
679   auto Pred = [&](const Instruction *I) {
680     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
681       if (Br->isConditional())
682         BrInsts.push_back(Br);
683     return true;
684   };
685 
686   // Here, accumulate conditional branch instructions in the context. We
687   // explore the child paths and collect the known states. The disjunction of
688   // those states can be merged to its own state. Let ParentState_i be a state
689   // to indicate the known information for an i-th branch instruction in the
690   // context. ChildStates are created for its successors respectively.
691   //
692   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
693   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
694   //      ...
695   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
696   //
697   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
698   //
699   // FIXME: Currently, recursive branches are not handled. For example, we
700   // can't deduce that ptr must be dereferenced in below function.
701   //
702   // void f(int a, int c, int *ptr) {
703   //    if(a)
704   //      if (b) {
705   //        *ptr = 0;
706   //      } else {
707   //        *ptr = 1;
708   //      }
709   //    else {
710   //      if (b) {
711   //        *ptr = 0;
712   //      } else {
713   //        *ptr = 1;
714   //      }
715   //    }
716   // }
717 
718   Explorer.checkForAllContext(&CtxI, Pred);
719   for (const BranchInst *Br : BrInsts) {
720     StateType ParentState;
721 
722     // The known state of the parent state is a conjunction of children's
723     // known states so it is initialized with a best state.
724     ParentState.indicateOptimisticFixpoint();
725 
726     for (const BasicBlock *BB : Br->successors()) {
727       StateType ChildState;
728 
729       size_t BeforeSize = Uses.size();
730       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
731 
732       // Erase uses which only appear in the child.
733       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
734         It = Uses.erase(It);
735 
736       ParentState &= ChildState;
737     }
738 
739     // Use only known state.
740     S += ParentState;
741   }
742 }
743 
744 /// -----------------------NoUnwind Function Attribute--------------------------
745 
746 struct AANoUnwindImpl : AANoUnwind {
747   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
748 
749   const std::string getAsStr() const override {
750     return getAssumed() ? "nounwind" : "may-unwind";
751   }
752 
753   /// See AbstractAttribute::updateImpl(...).
754   ChangeStatus updateImpl(Attributor &A) override {
755     auto Opcodes = {
756         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
757         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
758         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
759 
760     auto CheckForNoUnwind = [&](Instruction &I) {
761       if (!I.mayThrow())
762         return true;
763 
764       if (const auto *CB = dyn_cast<CallBase>(&I)) {
765         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
766             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
767         return NoUnwindAA.isAssumedNoUnwind();
768       }
769       return false;
770     };
771 
772     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
773       return indicatePessimisticFixpoint();
774 
775     return ChangeStatus::UNCHANGED;
776   }
777 };
778 
779 struct AANoUnwindFunction final : public AANoUnwindImpl {
780   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
781       : AANoUnwindImpl(IRP, A) {}
782 
783   /// See AbstractAttribute::trackStatistics()
784   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
785 };
786 
787 /// NoUnwind attribute deduction for a call sites.
788 struct AANoUnwindCallSite final : AANoUnwindImpl {
789   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
790       : AANoUnwindImpl(IRP, A) {}
791 
792   /// See AbstractAttribute::initialize(...).
793   void initialize(Attributor &A) override {
794     AANoUnwindImpl::initialize(A);
795     Function *F = getAssociatedFunction();
796     if (!F || F->isDeclaration())
797       indicatePessimisticFixpoint();
798   }
799 
800   /// See AbstractAttribute::updateImpl(...).
801   ChangeStatus updateImpl(Attributor &A) override {
802     // TODO: Once we have call site specific value information we can provide
803     //       call site specific liveness information and then it makes
804     //       sense to specialize attributes for call sites arguments instead of
805     //       redirecting requests to the callee argument.
806     Function *F = getAssociatedFunction();
807     const IRPosition &FnPos = IRPosition::function(*F);
808     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
809     return clampStateAndIndicateChange(getState(), FnAA.getState());
810   }
811 
812   /// See AbstractAttribute::trackStatistics()
813   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
814 };
815 
816 /// --------------------- Function Return Values -------------------------------
817 
818 /// "Attribute" that collects all potential returned values and the return
819 /// instructions that they arise from.
820 ///
821 /// If there is a unique returned value R, the manifest method will:
822 ///   - mark R with the "returned" attribute, if R is an argument.
823 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
824 
825   /// Mapping of values potentially returned by the associated function to the
826   /// return instructions that might return them.
827   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
828 
829   /// Mapping to remember the number of returned values for a call site such
830   /// that we can avoid updates if nothing changed.
831   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
832 
833   /// Set of unresolved calls returned by the associated function.
834   SmallSetVector<CallBase *, 4> UnresolvedCalls;
835 
836   /// State flags
837   ///
838   ///{
839   bool IsFixed = false;
840   bool IsValidState = true;
841   ///}
842 
843 public:
844   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
845       : AAReturnedValues(IRP, A) {}
846 
847   /// See AbstractAttribute::initialize(...).
848   void initialize(Attributor &A) override {
849     // Reset the state.
850     IsFixed = false;
851     IsValidState = true;
852     ReturnedValues.clear();
853 
854     Function *F = getAssociatedFunction();
855     if (!F || F->isDeclaration()) {
856       indicatePessimisticFixpoint();
857       return;
858     }
859     assert(!F->getReturnType()->isVoidTy() &&
860            "Did not expect a void return type!");
861 
862     // The map from instruction opcodes to those instructions in the function.
863     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
864 
865     // Look through all arguments, if one is marked as returned we are done.
866     for (Argument &Arg : F->args()) {
867       if (Arg.hasReturnedAttr()) {
868         auto &ReturnInstSet = ReturnedValues[&Arg];
869         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
870           for (Instruction *RI : *Insts)
871             ReturnInstSet.insert(cast<ReturnInst>(RI));
872 
873         indicateOptimisticFixpoint();
874         return;
875       }
876     }
877 
878     if (!A.isFunctionIPOAmendable(*F))
879       indicatePessimisticFixpoint();
880   }
881 
882   /// See AbstractAttribute::manifest(...).
883   ChangeStatus manifest(Attributor &A) override;
884 
885   /// See AbstractAttribute::getState(...).
886   AbstractState &getState() override { return *this; }
887 
888   /// See AbstractAttribute::getState(...).
889   const AbstractState &getState() const override { return *this; }
890 
891   /// See AbstractAttribute::updateImpl(Attributor &A).
892   ChangeStatus updateImpl(Attributor &A) override;
893 
894   llvm::iterator_range<iterator> returned_values() override {
895     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
896   }
897 
898   llvm::iterator_range<const_iterator> returned_values() const override {
899     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
900   }
901 
902   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
903     return UnresolvedCalls;
904   }
905 
906   /// Return the number of potential return values, -1 if unknown.
907   size_t getNumReturnValues() const override {
908     return isValidState() ? ReturnedValues.size() : -1;
909   }
910 
911   /// Return an assumed unique return value if a single candidate is found. If
912   /// there cannot be one, return a nullptr. If it is not clear yet, return the
913   /// Optional::NoneType.
914   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
915 
916   /// See AbstractState::checkForAllReturnedValues(...).
917   bool checkForAllReturnedValuesAndReturnInsts(
918       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
919       const override;
920 
921   /// Pretty print the attribute similar to the IR representation.
922   const std::string getAsStr() const override;
923 
924   /// See AbstractState::isAtFixpoint().
925   bool isAtFixpoint() const override { return IsFixed; }
926 
927   /// See AbstractState::isValidState().
928   bool isValidState() const override { return IsValidState; }
929 
930   /// See AbstractState::indicateOptimisticFixpoint(...).
931   ChangeStatus indicateOptimisticFixpoint() override {
932     IsFixed = true;
933     return ChangeStatus::UNCHANGED;
934   }
935 
936   ChangeStatus indicatePessimisticFixpoint() override {
937     IsFixed = true;
938     IsValidState = false;
939     return ChangeStatus::CHANGED;
940   }
941 };
942 
943 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
944   ChangeStatus Changed = ChangeStatus::UNCHANGED;
945 
946   // Bookkeeping.
947   assert(isValidState());
948   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
949                   "Number of function with known return values");
950 
951   // Check if we have an assumed unique return value that we could manifest.
952   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
953 
954   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
955     return Changed;
956 
957   // Bookkeeping.
958   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
959                   "Number of function with unique return");
960 
961   // Callback to replace the uses of CB with the constant C.
962   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
963     if (CB.use_empty())
964       return ChangeStatus::UNCHANGED;
965     if (A.changeValueAfterManifest(CB, C))
966       return ChangeStatus::CHANGED;
967     return ChangeStatus::UNCHANGED;
968   };
969 
970   // If the assumed unique return value is an argument, annotate it.
971   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
972     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
973             getAssociatedFunction()->getReturnType())) {
974       getIRPosition() = IRPosition::argument(*UniqueRVArg);
975       Changed = IRAttribute::manifest(A);
976     }
977   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
978     // We can replace the returned value with the unique returned constant.
979     Value &AnchorValue = getAnchorValue();
980     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
981       for (const Use &U : F->uses())
982         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
983           if (CB->isCallee(&U)) {
984             Constant *RVCCast =
985                 CB->getType() == RVC->getType()
986                     ? RVC
987                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
988             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
989           }
990     } else {
991       assert(isa<CallBase>(AnchorValue) &&
992              "Expcected a function or call base anchor!");
993       Constant *RVCCast =
994           AnchorValue.getType() == RVC->getType()
995               ? RVC
996               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
997       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
998     }
999     if (Changed == ChangeStatus::CHANGED)
1000       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
1001                       "Number of function returns replaced by constant return");
1002   }
1003 
1004   return Changed;
1005 }
1006 
1007 const std::string AAReturnedValuesImpl::getAsStr() const {
1008   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1009          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1010          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1011 }
1012 
1013 Optional<Value *>
1014 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1015   // If checkForAllReturnedValues provides a unique value, ignoring potential
1016   // undef values that can also be present, it is assumed to be the actual
1017   // return value and forwarded to the caller of this method. If there are
1018   // multiple, a nullptr is returned indicating there cannot be a unique
1019   // returned value.
1020   Optional<Value *> UniqueRV;
1021 
1022   auto Pred = [&](Value &RV) -> bool {
1023     // If we found a second returned value and neither the current nor the saved
1024     // one is an undef, there is no unique returned value. Undefs are special
1025     // since we can pretend they have any value.
1026     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1027         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1028       UniqueRV = nullptr;
1029       return false;
1030     }
1031 
1032     // Do not overwrite a value with an undef.
1033     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1034       UniqueRV = &RV;
1035 
1036     return true;
1037   };
1038 
1039   if (!A.checkForAllReturnedValues(Pred, *this))
1040     UniqueRV = nullptr;
1041 
1042   return UniqueRV;
1043 }
1044 
1045 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1046     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1047     const {
1048   if (!isValidState())
1049     return false;
1050 
1051   // Check all returned values but ignore call sites as long as we have not
1052   // encountered an overdefined one during an update.
1053   for (auto &It : ReturnedValues) {
1054     Value *RV = It.first;
1055 
1056     CallBase *CB = dyn_cast<CallBase>(RV);
1057     if (CB && !UnresolvedCalls.count(CB))
1058       continue;
1059 
1060     if (!Pred(*RV, It.second))
1061       return false;
1062   }
1063 
1064   return true;
1065 }
1066 
1067 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1068   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1069   bool Changed = false;
1070 
1071   // State used in the value traversals starting in returned values.
1072   struct RVState {
1073     // The map in which we collect return values -> return instrs.
1074     decltype(ReturnedValues) &RetValsMap;
1075     // The flag to indicate a change.
1076     bool &Changed;
1077     // The return instrs we come from.
1078     SmallSetVector<ReturnInst *, 4> RetInsts;
1079   };
1080 
1081   // Callback for a leaf value returned by the associated function.
1082   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1083                          bool) -> bool {
1084     auto Size = RVS.RetValsMap[&Val].size();
1085     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1086     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1087     RVS.Changed |= Inserted;
1088     LLVM_DEBUG({
1089       if (Inserted)
1090         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1091                << " => " << RVS.RetInsts.size() << "\n";
1092     });
1093     return true;
1094   };
1095 
1096   // Helper method to invoke the generic value traversal.
1097   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1098                                 const Instruction *CtxI) {
1099     IRPosition RetValPos = IRPosition::value(RV);
1100     return genericValueTraversal<AAReturnedValues, RVState>(
1101         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1102         /* UseValueSimplify */ false);
1103   };
1104 
1105   // Callback for all "return intructions" live in the associated function.
1106   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1107     ReturnInst &Ret = cast<ReturnInst>(I);
1108     RVState RVS({ReturnedValues, Changed, {}});
1109     RVS.RetInsts.insert(&Ret);
1110     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1111   };
1112 
1113   // Start by discovering returned values from all live returned instructions in
1114   // the associated function.
1115   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1116     return indicatePessimisticFixpoint();
1117 
1118   // Once returned values "directly" present in the code are handled we try to
1119   // resolve returned calls. To avoid modifications to the ReturnedValues map
1120   // while we iterate over it we kept record of potential new entries in a copy
1121   // map, NewRVsMap.
1122   decltype(ReturnedValues) NewRVsMap;
1123 
1124   auto HandleReturnValue = [&](Value *RV,
1125                                SmallSetVector<ReturnInst *, 4> &RIs) {
1126     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1127                       << RIs.size() << " RIs\n");
1128     CallBase *CB = dyn_cast<CallBase>(RV);
1129     if (!CB || UnresolvedCalls.count(CB))
1130       return;
1131 
1132     if (!CB->getCalledFunction()) {
1133       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1134                         << "\n");
1135       UnresolvedCalls.insert(CB);
1136       return;
1137     }
1138 
1139     // TODO: use the function scope once we have call site AAReturnedValues.
1140     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1141         *this, IRPosition::function(*CB->getCalledFunction()),
1142         DepClassTy::REQUIRED);
1143     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1144                       << RetValAA << "\n");
1145 
1146     // Skip dead ends, thus if we do not know anything about the returned
1147     // call we mark it as unresolved and it will stay that way.
1148     if (!RetValAA.getState().isValidState()) {
1149       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1150                         << "\n");
1151       UnresolvedCalls.insert(CB);
1152       return;
1153     }
1154 
1155     // Do not try to learn partial information. If the callee has unresolved
1156     // return values we will treat the call as unresolved/opaque.
1157     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1158     if (!RetValAAUnresolvedCalls.empty()) {
1159       UnresolvedCalls.insert(CB);
1160       return;
1161     }
1162 
1163     // Now check if we can track transitively returned values. If possible, thus
1164     // if all return value can be represented in the current scope, do so.
1165     bool Unresolved = false;
1166     for (auto &RetValAAIt : RetValAA.returned_values()) {
1167       Value *RetVal = RetValAAIt.first;
1168       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1169           isa<Constant>(RetVal))
1170         continue;
1171       // Anything that did not fit in the above categories cannot be resolved,
1172       // mark the call as unresolved.
1173       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1174                            "cannot be translated: "
1175                         << *RetVal << "\n");
1176       UnresolvedCalls.insert(CB);
1177       Unresolved = true;
1178       break;
1179     }
1180 
1181     if (Unresolved)
1182       return;
1183 
1184     // Now track transitively returned values.
1185     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1186     if (NumRetAA == RetValAA.getNumReturnValues()) {
1187       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1188                            "changed since it was seen last\n");
1189       return;
1190     }
1191     NumRetAA = RetValAA.getNumReturnValues();
1192 
1193     for (auto &RetValAAIt : RetValAA.returned_values()) {
1194       Value *RetVal = RetValAAIt.first;
1195       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1196         // Arguments are mapped to call site operands and we begin the traversal
1197         // again.
1198         bool Unused = false;
1199         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1200         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1201         continue;
1202       }
1203       if (isa<CallBase>(RetVal)) {
1204         // Call sites are resolved by the callee attribute over time, no need to
1205         // do anything for us.
1206         continue;
1207       }
1208       if (isa<Constant>(RetVal)) {
1209         // Constants are valid everywhere, we can simply take them.
1210         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1211         continue;
1212       }
1213     }
1214   };
1215 
1216   for (auto &It : ReturnedValues)
1217     HandleReturnValue(It.first, It.second);
1218 
1219   // Because processing the new information can again lead to new return values
1220   // we have to be careful and iterate until this iteration is complete. The
1221   // idea is that we are in a stable state at the end of an update. All return
1222   // values have been handled and properly categorized. We might not update
1223   // again if we have not requested a non-fix attribute so we cannot "wait" for
1224   // the next update to analyze a new return value.
1225   while (!NewRVsMap.empty()) {
1226     auto It = std::move(NewRVsMap.back());
1227     NewRVsMap.pop_back();
1228 
1229     assert(!It.second.empty() && "Entry does not add anything.");
1230     auto &ReturnInsts = ReturnedValues[It.first];
1231     for (ReturnInst *RI : It.second)
1232       if (ReturnInsts.insert(RI)) {
1233         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1234                           << *It.first << " => " << *RI << "\n");
1235         HandleReturnValue(It.first, ReturnInsts);
1236         Changed = true;
1237       }
1238   }
1239 
1240   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1241   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1242 }
1243 
1244 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1245   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1246       : AAReturnedValuesImpl(IRP, A) {}
1247 
1248   /// See AbstractAttribute::trackStatistics()
1249   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1250 };
1251 
1252 /// Returned values information for a call sites.
1253 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1254   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1255       : AAReturnedValuesImpl(IRP, A) {}
1256 
1257   /// See AbstractAttribute::initialize(...).
1258   void initialize(Attributor &A) override {
1259     // TODO: Once we have call site specific value information we can provide
1260     //       call site specific liveness information and then it makes
1261     //       sense to specialize attributes for call sites instead of
1262     //       redirecting requests to the callee.
1263     llvm_unreachable("Abstract attributes for returned values are not "
1264                      "supported for call sites yet!");
1265   }
1266 
1267   /// See AbstractAttribute::updateImpl(...).
1268   ChangeStatus updateImpl(Attributor &A) override {
1269     return indicatePessimisticFixpoint();
1270   }
1271 
1272   /// See AbstractAttribute::trackStatistics()
1273   void trackStatistics() const override {}
1274 };
1275 
1276 /// ------------------------ NoSync Function Attribute -------------------------
1277 
1278 struct AANoSyncImpl : AANoSync {
1279   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1280 
1281   const std::string getAsStr() const override {
1282     return getAssumed() ? "nosync" : "may-sync";
1283   }
1284 
1285   /// See AbstractAttribute::updateImpl(...).
1286   ChangeStatus updateImpl(Attributor &A) override;
1287 
1288   /// Helper function used to determine whether an instruction is non-relaxed
1289   /// atomic. In other words, if an atomic instruction does not have unordered
1290   /// or monotonic ordering
1291   static bool isNonRelaxedAtomic(Instruction *I);
1292 
1293   /// Helper function used to determine whether an instruction is volatile.
1294   static bool isVolatile(Instruction *I);
1295 
1296   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1297   /// memset).
1298   static bool isNoSyncIntrinsic(Instruction *I);
1299 };
1300 
1301 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1302   if (!I->isAtomic())
1303     return false;
1304 
1305   AtomicOrdering Ordering;
1306   switch (I->getOpcode()) {
1307   case Instruction::AtomicRMW:
1308     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1309     break;
1310   case Instruction::Store:
1311     Ordering = cast<StoreInst>(I)->getOrdering();
1312     break;
1313   case Instruction::Load:
1314     Ordering = cast<LoadInst>(I)->getOrdering();
1315     break;
1316   case Instruction::Fence: {
1317     auto *FI = cast<FenceInst>(I);
1318     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1319       return false;
1320     Ordering = FI->getOrdering();
1321     break;
1322   }
1323   case Instruction::AtomicCmpXchg: {
1324     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1325     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1326     // Only if both are relaxed, than it can be treated as relaxed.
1327     // Otherwise it is non-relaxed.
1328     if (Success != AtomicOrdering::Unordered &&
1329         Success != AtomicOrdering::Monotonic)
1330       return true;
1331     if (Failure != AtomicOrdering::Unordered &&
1332         Failure != AtomicOrdering::Monotonic)
1333       return true;
1334     return false;
1335   }
1336   default:
1337     llvm_unreachable(
1338         "New atomic operations need to be known in the attributor.");
1339   }
1340 
1341   // Relaxed.
1342   if (Ordering == AtomicOrdering::Unordered ||
1343       Ordering == AtomicOrdering::Monotonic)
1344     return false;
1345   return true;
1346 }
1347 
1348 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1349 /// FIXME: We should ipmrove the handling of intrinsics.
1350 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1351   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1352     switch (II->getIntrinsicID()) {
1353     /// Element wise atomic memory intrinsics are can only be unordered,
1354     /// therefore nosync.
1355     case Intrinsic::memset_element_unordered_atomic:
1356     case Intrinsic::memmove_element_unordered_atomic:
1357     case Intrinsic::memcpy_element_unordered_atomic:
1358       return true;
1359     case Intrinsic::memset:
1360     case Intrinsic::memmove:
1361     case Intrinsic::memcpy:
1362       if (!cast<MemIntrinsic>(II)->isVolatile())
1363         return true;
1364       return false;
1365     default:
1366       return false;
1367     }
1368   }
1369   return false;
1370 }
1371 
1372 bool AANoSyncImpl::isVolatile(Instruction *I) {
1373   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1374 
1375   switch (I->getOpcode()) {
1376   case Instruction::AtomicRMW:
1377     return cast<AtomicRMWInst>(I)->isVolatile();
1378   case Instruction::Store:
1379     return cast<StoreInst>(I)->isVolatile();
1380   case Instruction::Load:
1381     return cast<LoadInst>(I)->isVolatile();
1382   case Instruction::AtomicCmpXchg:
1383     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1384   default:
1385     return false;
1386   }
1387 }
1388 
1389 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1390 
1391   auto CheckRWInstForNoSync = [&](Instruction &I) {
1392     /// We are looking for volatile instructions or Non-Relaxed atomics.
1393     /// FIXME: We should improve the handling of intrinsics.
1394 
1395     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1396       return true;
1397 
1398     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1399       if (CB->hasFnAttr(Attribute::NoSync))
1400         return true;
1401 
1402       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1403           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1404       return NoSyncAA.isAssumedNoSync();
1405     }
1406 
1407     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1408       return true;
1409 
1410     return false;
1411   };
1412 
1413   auto CheckForNoSync = [&](Instruction &I) {
1414     // At this point we handled all read/write effects and they are all
1415     // nosync, so they can be skipped.
1416     if (I.mayReadOrWriteMemory())
1417       return true;
1418 
1419     // non-convergent and readnone imply nosync.
1420     return !cast<CallBase>(I).isConvergent();
1421   };
1422 
1423   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1424       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1425     return indicatePessimisticFixpoint();
1426 
1427   return ChangeStatus::UNCHANGED;
1428 }
1429 
1430 struct AANoSyncFunction final : public AANoSyncImpl {
1431   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1432       : AANoSyncImpl(IRP, A) {}
1433 
1434   /// See AbstractAttribute::trackStatistics()
1435   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1436 };
1437 
1438 /// NoSync attribute deduction for a call sites.
1439 struct AANoSyncCallSite final : AANoSyncImpl {
1440   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1441       : AANoSyncImpl(IRP, A) {}
1442 
1443   /// See AbstractAttribute::initialize(...).
1444   void initialize(Attributor &A) override {
1445     AANoSyncImpl::initialize(A);
1446     Function *F = getAssociatedFunction();
1447     if (!F || F->isDeclaration())
1448       indicatePessimisticFixpoint();
1449   }
1450 
1451   /// See AbstractAttribute::updateImpl(...).
1452   ChangeStatus updateImpl(Attributor &A) override {
1453     // TODO: Once we have call site specific value information we can provide
1454     //       call site specific liveness information and then it makes
1455     //       sense to specialize attributes for call sites arguments instead of
1456     //       redirecting requests to the callee argument.
1457     Function *F = getAssociatedFunction();
1458     const IRPosition &FnPos = IRPosition::function(*F);
1459     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1460     return clampStateAndIndicateChange(getState(), FnAA.getState());
1461   }
1462 
1463   /// See AbstractAttribute::trackStatistics()
1464   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1465 };
1466 
1467 /// ------------------------ No-Free Attributes ----------------------------
1468 
1469 struct AANoFreeImpl : public AANoFree {
1470   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1471 
1472   /// See AbstractAttribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     auto CheckForNoFree = [&](Instruction &I) {
1475       const auto &CB = cast<CallBase>(I);
1476       if (CB.hasFnAttr(Attribute::NoFree))
1477         return true;
1478 
1479       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1480           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1481       return NoFreeAA.isAssumedNoFree();
1482     };
1483 
1484     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1485       return indicatePessimisticFixpoint();
1486     return ChangeStatus::UNCHANGED;
1487   }
1488 
1489   /// See AbstractAttribute::getAsStr().
1490   const std::string getAsStr() const override {
1491     return getAssumed() ? "nofree" : "may-free";
1492   }
1493 };
1494 
1495 struct AANoFreeFunction final : public AANoFreeImpl {
1496   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1497       : AANoFreeImpl(IRP, A) {}
1498 
1499   /// See AbstractAttribute::trackStatistics()
1500   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1501 };
1502 
1503 /// NoFree attribute deduction for a call sites.
1504 struct AANoFreeCallSite final : AANoFreeImpl {
1505   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1506       : AANoFreeImpl(IRP, A) {}
1507 
1508   /// See AbstractAttribute::initialize(...).
1509   void initialize(Attributor &A) override {
1510     AANoFreeImpl::initialize(A);
1511     Function *F = getAssociatedFunction();
1512     if (!F || F->isDeclaration())
1513       indicatePessimisticFixpoint();
1514   }
1515 
1516   /// See AbstractAttribute::updateImpl(...).
1517   ChangeStatus updateImpl(Attributor &A) override {
1518     // TODO: Once we have call site specific value information we can provide
1519     //       call site specific liveness information and then it makes
1520     //       sense to specialize attributes for call sites arguments instead of
1521     //       redirecting requests to the callee argument.
1522     Function *F = getAssociatedFunction();
1523     const IRPosition &FnPos = IRPosition::function(*F);
1524     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1525     return clampStateAndIndicateChange(getState(), FnAA.getState());
1526   }
1527 
1528   /// See AbstractAttribute::trackStatistics()
1529   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1530 };
1531 
1532 /// NoFree attribute for floating values.
1533 struct AANoFreeFloating : AANoFreeImpl {
1534   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1535       : AANoFreeImpl(IRP, A) {}
1536 
1537   /// See AbstractAttribute::trackStatistics()
1538   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1539 
1540   /// See Abstract Attribute::updateImpl(...).
1541   ChangeStatus updateImpl(Attributor &A) override {
1542     const IRPosition &IRP = getIRPosition();
1543 
1544     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1545         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1546     if (NoFreeAA.isAssumedNoFree())
1547       return ChangeStatus::UNCHANGED;
1548 
1549     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1550     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1551       Instruction *UserI = cast<Instruction>(U.getUser());
1552       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1553         if (CB->isBundleOperand(&U))
1554           return false;
1555         if (!CB->isArgOperand(&U))
1556           return true;
1557         unsigned ArgNo = CB->getArgOperandNo(&U);
1558 
1559         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1560             *this, IRPosition::callsite_argument(*CB, ArgNo),
1561             DepClassTy::REQUIRED);
1562         return NoFreeArg.isAssumedNoFree();
1563       }
1564 
1565       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1566           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1567         Follow = true;
1568         return true;
1569       }
1570       if (isa<ReturnInst>(UserI))
1571         return true;
1572 
1573       // Unknown user.
1574       return false;
1575     };
1576     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1577       return indicatePessimisticFixpoint();
1578 
1579     return ChangeStatus::UNCHANGED;
1580   }
1581 };
1582 
1583 /// NoFree attribute for a call site argument.
1584 struct AANoFreeArgument final : AANoFreeFloating {
1585   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1586       : AANoFreeFloating(IRP, A) {}
1587 
1588   /// See AbstractAttribute::trackStatistics()
1589   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1590 };
1591 
1592 /// NoFree attribute for call site arguments.
1593 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1594   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1595       : AANoFreeFloating(IRP, A) {}
1596 
1597   /// See AbstractAttribute::updateImpl(...).
1598   ChangeStatus updateImpl(Attributor &A) override {
1599     // TODO: Once we have call site specific value information we can provide
1600     //       call site specific liveness information and then it makes
1601     //       sense to specialize attributes for call sites arguments instead of
1602     //       redirecting requests to the callee argument.
1603     Argument *Arg = getAssociatedArgument();
1604     if (!Arg)
1605       return indicatePessimisticFixpoint();
1606     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1607     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1608     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1609   }
1610 
1611   /// See AbstractAttribute::trackStatistics()
1612   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1613 };
1614 
1615 /// NoFree attribute for function return value.
1616 struct AANoFreeReturned final : AANoFreeFloating {
1617   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1618       : AANoFreeFloating(IRP, A) {
1619     llvm_unreachable("NoFree is not applicable to function returns!");
1620   }
1621 
1622   /// See AbstractAttribute::initialize(...).
1623   void initialize(Attributor &A) override {
1624     llvm_unreachable("NoFree is not applicable to function returns!");
1625   }
1626 
1627   /// See AbstractAttribute::updateImpl(...).
1628   ChangeStatus updateImpl(Attributor &A) override {
1629     llvm_unreachable("NoFree is not applicable to function returns!");
1630   }
1631 
1632   /// See AbstractAttribute::trackStatistics()
1633   void trackStatistics() const override {}
1634 };
1635 
1636 /// NoFree attribute deduction for a call site return value.
1637 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1638   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1639       : AANoFreeFloating(IRP, A) {}
1640 
1641   ChangeStatus manifest(Attributor &A) override {
1642     return ChangeStatus::UNCHANGED;
1643   }
1644   /// See AbstractAttribute::trackStatistics()
1645   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1646 };
1647 
1648 /// ------------------------ NonNull Argument Attribute ------------------------
1649 static int64_t getKnownNonNullAndDerefBytesForUse(
1650     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1651     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1652   TrackUse = false;
1653 
1654   const Value *UseV = U->get();
1655   if (!UseV->getType()->isPointerTy())
1656     return 0;
1657 
1658   // We need to follow common pointer manipulation uses to the accesses they
1659   // feed into. We can try to be smart to avoid looking through things we do not
1660   // like for now, e.g., non-inbounds GEPs.
1661   if (isa<CastInst>(I)) {
1662     TrackUse = true;
1663     return 0;
1664   }
1665 
1666   if (isa<GetElementPtrInst>(I)) {
1667     TrackUse = true;
1668     return 0;
1669   }
1670 
1671   Type *PtrTy = UseV->getType();
1672   const Function *F = I->getFunction();
1673   bool NullPointerIsDefined =
1674       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1675   const DataLayout &DL = A.getInfoCache().getDL();
1676   if (const auto *CB = dyn_cast<CallBase>(I)) {
1677     if (CB->isBundleOperand(U)) {
1678       if (RetainedKnowledge RK = getKnowledgeFromUse(
1679               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1680         IsNonNull |=
1681             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1682         return RK.ArgValue;
1683       }
1684       return 0;
1685     }
1686 
1687     if (CB->isCallee(U)) {
1688       IsNonNull |= !NullPointerIsDefined;
1689       return 0;
1690     }
1691 
1692     unsigned ArgNo = CB->getArgOperandNo(U);
1693     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1694     // As long as we only use known information there is no need to track
1695     // dependences here.
1696     auto &DerefAA =
1697         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1698     IsNonNull |= DerefAA.isKnownNonNull();
1699     return DerefAA.getKnownDereferenceableBytes();
1700   }
1701 
1702   int64_t Offset;
1703   const Value *Base =
1704       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1705   if (Base) {
1706     if (Base == &AssociatedValue &&
1707         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1708       int64_t DerefBytes =
1709           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1710 
1711       IsNonNull |= !NullPointerIsDefined;
1712       return std::max(int64_t(0), DerefBytes);
1713     }
1714   }
1715 
1716   /// Corner case when an offset is 0.
1717   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1718                                               /*AllowNonInbounds*/ true);
1719   if (Base) {
1720     if (Offset == 0 && Base == &AssociatedValue &&
1721         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1722       int64_t DerefBytes =
1723           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1724       IsNonNull |= !NullPointerIsDefined;
1725       return std::max(int64_t(0), DerefBytes);
1726     }
1727   }
1728 
1729   return 0;
1730 }
1731 
1732 struct AANonNullImpl : AANonNull {
1733   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1734       : AANonNull(IRP, A),
1735         NullIsDefined(NullPointerIsDefined(
1736             getAnchorScope(),
1737             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1738 
1739   /// See AbstractAttribute::initialize(...).
1740   void initialize(Attributor &A) override {
1741     Value &V = getAssociatedValue();
1742     if (!NullIsDefined &&
1743         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1744                 /* IgnoreSubsumingPositions */ false, &A)) {
1745       indicateOptimisticFixpoint();
1746       return;
1747     }
1748 
1749     if (isa<ConstantPointerNull>(V)) {
1750       indicatePessimisticFixpoint();
1751       return;
1752     }
1753 
1754     AANonNull::initialize(A);
1755 
1756     bool CanBeNull = true;
1757     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1758       if (!CanBeNull) {
1759         indicateOptimisticFixpoint();
1760         return;
1761       }
1762     }
1763 
1764     if (isa<GlobalValue>(&getAssociatedValue())) {
1765       indicatePessimisticFixpoint();
1766       return;
1767     }
1768 
1769     if (Instruction *CtxI = getCtxI())
1770       followUsesInMBEC(*this, A, getState(), *CtxI);
1771   }
1772 
1773   /// See followUsesInMBEC
1774   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1775                        AANonNull::StateType &State) {
1776     bool IsNonNull = false;
1777     bool TrackUse = false;
1778     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1779                                        IsNonNull, TrackUse);
1780     State.setKnown(IsNonNull);
1781     return TrackUse;
1782   }
1783 
1784   /// See AbstractAttribute::getAsStr().
1785   const std::string getAsStr() const override {
1786     return getAssumed() ? "nonnull" : "may-null";
1787   }
1788 
1789   /// Flag to determine if the underlying value can be null and still allow
1790   /// valid accesses.
1791   const bool NullIsDefined;
1792 };
1793 
1794 /// NonNull attribute for a floating value.
1795 struct AANonNullFloating : public AANonNullImpl {
1796   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1797       : AANonNullImpl(IRP, A) {}
1798 
1799   /// See AbstractAttribute::updateImpl(...).
1800   ChangeStatus updateImpl(Attributor &A) override {
1801     const DataLayout &DL = A.getDataLayout();
1802 
1803     DominatorTree *DT = nullptr;
1804     AssumptionCache *AC = nullptr;
1805     InformationCache &InfoCache = A.getInfoCache();
1806     if (const Function *Fn = getAnchorScope()) {
1807       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1808       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1809     }
1810 
1811     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1812                             AANonNull::StateType &T, bool Stripped) -> bool {
1813       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1814                                              DepClassTy::REQUIRED);
1815       if (!Stripped && this == &AA) {
1816         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1817           T.indicatePessimisticFixpoint();
1818       } else {
1819         // Use abstract attribute information.
1820         const AANonNull::StateType &NS = AA.getState();
1821         T ^= NS;
1822       }
1823       return T.isValidState();
1824     };
1825 
1826     StateType T;
1827     if (!genericValueTraversal<AANonNull, StateType>(
1828             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1829       return indicatePessimisticFixpoint();
1830 
1831     return clampStateAndIndicateChange(getState(), T);
1832   }
1833 
1834   /// See AbstractAttribute::trackStatistics()
1835   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1836 };
1837 
1838 /// NonNull attribute for function return value.
1839 struct AANonNullReturned final
1840     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1841   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1842       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1843 
1844   /// See AbstractAttribute::getAsStr().
1845   const std::string getAsStr() const override {
1846     return getAssumed() ? "nonnull" : "may-null";
1847   }
1848 
1849   /// See AbstractAttribute::trackStatistics()
1850   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1851 };
1852 
1853 /// NonNull attribute for function argument.
1854 struct AANonNullArgument final
1855     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1856   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1857       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1858 
1859   /// See AbstractAttribute::trackStatistics()
1860   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1861 };
1862 
1863 struct AANonNullCallSiteArgument final : AANonNullFloating {
1864   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1865       : AANonNullFloating(IRP, A) {}
1866 
1867   /// See AbstractAttribute::trackStatistics()
1868   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1869 };
1870 
1871 /// NonNull attribute for a call site return position.
1872 struct AANonNullCallSiteReturned final
1873     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1874   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1875       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1876 
1877   /// See AbstractAttribute::trackStatistics()
1878   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1879 };
1880 
1881 /// ------------------------ No-Recurse Attributes ----------------------------
1882 
1883 struct AANoRecurseImpl : public AANoRecurse {
1884   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1885 
1886   /// See AbstractAttribute::getAsStr()
1887   const std::string getAsStr() const override {
1888     return getAssumed() ? "norecurse" : "may-recurse";
1889   }
1890 };
1891 
1892 struct AANoRecurseFunction final : AANoRecurseImpl {
1893   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1894       : AANoRecurseImpl(IRP, A) {}
1895 
1896   /// See AbstractAttribute::initialize(...).
1897   void initialize(Attributor &A) override {
1898     AANoRecurseImpl::initialize(A);
1899     if (const Function *F = getAnchorScope())
1900       if (A.getInfoCache().getSccSize(*F) != 1)
1901         indicatePessimisticFixpoint();
1902   }
1903 
1904   /// See AbstractAttribute::updateImpl(...).
1905   ChangeStatus updateImpl(Attributor &A) override {
1906 
1907     // If all live call sites are known to be no-recurse, we are as well.
1908     auto CallSitePred = [&](AbstractCallSite ACS) {
1909       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1910           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1911           DepClassTy::NONE);
1912       return NoRecurseAA.isKnownNoRecurse();
1913     };
1914     bool AllCallSitesKnown;
1915     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1916       // If we know all call sites and all are known no-recurse, we are done.
1917       // If all known call sites, which might not be all that exist, are known
1918       // to be no-recurse, we are not done but we can continue to assume
1919       // no-recurse. If one of the call sites we have not visited will become
1920       // live, another update is triggered.
1921       if (AllCallSitesKnown)
1922         indicateOptimisticFixpoint();
1923       return ChangeStatus::UNCHANGED;
1924     }
1925 
1926     // If the above check does not hold anymore we look at the calls.
1927     auto CheckForNoRecurse = [&](Instruction &I) {
1928       const auto &CB = cast<CallBase>(I);
1929       if (CB.hasFnAttr(Attribute::NoRecurse))
1930         return true;
1931 
1932       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1933           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1934       if (!NoRecurseAA.isAssumedNoRecurse())
1935         return false;
1936 
1937       // Recursion to the same function
1938       if (CB.getCalledFunction() == getAnchorScope())
1939         return false;
1940 
1941       return true;
1942     };
1943 
1944     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1945       return indicatePessimisticFixpoint();
1946     return ChangeStatus::UNCHANGED;
1947   }
1948 
1949   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1950 };
1951 
1952 /// NoRecurse attribute deduction for a call sites.
1953 struct AANoRecurseCallSite final : AANoRecurseImpl {
1954   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1955       : AANoRecurseImpl(IRP, A) {}
1956 
1957   /// See AbstractAttribute::initialize(...).
1958   void initialize(Attributor &A) override {
1959     AANoRecurseImpl::initialize(A);
1960     Function *F = getAssociatedFunction();
1961     if (!F || F->isDeclaration())
1962       indicatePessimisticFixpoint();
1963   }
1964 
1965   /// See AbstractAttribute::updateImpl(...).
1966   ChangeStatus updateImpl(Attributor &A) override {
1967     // TODO: Once we have call site specific value information we can provide
1968     //       call site specific liveness information and then it makes
1969     //       sense to specialize attributes for call sites arguments instead of
1970     //       redirecting requests to the callee argument.
1971     Function *F = getAssociatedFunction();
1972     const IRPosition &FnPos = IRPosition::function(*F);
1973     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1974     return clampStateAndIndicateChange(getState(), FnAA.getState());
1975   }
1976 
1977   /// See AbstractAttribute::trackStatistics()
1978   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1979 };
1980 
1981 /// -------------------- Undefined-Behavior Attributes ------------------------
1982 
1983 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1984   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1985       : AAUndefinedBehavior(IRP, A) {}
1986 
1987   /// See AbstractAttribute::updateImpl(...).
1988   // through a pointer (i.e. also branches etc.)
1989   ChangeStatus updateImpl(Attributor &A) override {
1990     const size_t UBPrevSize = KnownUBInsts.size();
1991     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1992 
1993     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1994       // Skip instructions that are already saved.
1995       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1996         return true;
1997 
1998       // If we reach here, we know we have an instruction
1999       // that accesses memory through a pointer operand,
2000       // for which getPointerOperand() should give it to us.
2001       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2002       assert(PtrOp &&
2003              "Expected pointer operand of memory accessing instruction");
2004 
2005       // Either we stopped and the appropriate action was taken,
2006       // or we got back a simplified value to continue.
2007       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2008       if (!SimplifiedPtrOp.hasValue())
2009         return true;
2010       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2011 
2012       // A memory access through a pointer is considered UB
2013       // only if the pointer has constant null value.
2014       // TODO: Expand it to not only check constant values.
2015       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2016         AssumedNoUBInsts.insert(&I);
2017         return true;
2018       }
2019       const Type *PtrTy = PtrOpVal->getType();
2020 
2021       // Because we only consider instructions inside functions,
2022       // assume that a parent function exists.
2023       const Function *F = I.getFunction();
2024 
2025       // A memory access using constant null pointer is only considered UB
2026       // if null pointer is _not_ defined for the target platform.
2027       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2028         AssumedNoUBInsts.insert(&I);
2029       else
2030         KnownUBInsts.insert(&I);
2031       return true;
2032     };
2033 
2034     auto InspectBrInstForUB = [&](Instruction &I) {
2035       // A conditional branch instruction is considered UB if it has `undef`
2036       // condition.
2037 
2038       // Skip instructions that are already saved.
2039       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2040         return true;
2041 
2042       // We know we have a branch instruction.
2043       auto BrInst = cast<BranchInst>(&I);
2044 
2045       // Unconditional branches are never considered UB.
2046       if (BrInst->isUnconditional())
2047         return true;
2048 
2049       // Either we stopped and the appropriate action was taken,
2050       // or we got back a simplified value to continue.
2051       Optional<Value *> SimplifiedCond =
2052           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2053       if (!SimplifiedCond.hasValue())
2054         return true;
2055       AssumedNoUBInsts.insert(&I);
2056       return true;
2057     };
2058 
2059     auto InspectCallSiteForUB = [&](Instruction &I) {
2060       // Check whether a callsite always cause UB or not
2061 
2062       // Skip instructions that are already saved.
2063       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2064         return true;
2065 
2066       // Check nonnull and noundef argument attribute violation for each
2067       // callsite.
2068       CallBase &CB = cast<CallBase>(I);
2069       Function *Callee = CB.getCalledFunction();
2070       if (!Callee)
2071         return true;
2072       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2073         // If current argument is known to be simplified to null pointer and the
2074         // corresponding argument position is known to have nonnull attribute,
2075         // the argument is poison. Furthermore, if the argument is poison and
2076         // the position is known to have noundef attriubte, this callsite is
2077         // considered UB.
2078         if (idx >= Callee->arg_size())
2079           break;
2080         Value *ArgVal = CB.getArgOperand(idx);
2081         if (!ArgVal)
2082           continue;
2083         // Here, we handle three cases.
2084         //   (1) Not having a value means it is dead. (we can replace the value
2085         //       with undef)
2086         //   (2) Simplified to undef. The argument violate noundef attriubte.
2087         //   (3) Simplified to null pointer where known to be nonnull.
2088         //       The argument is a poison value and violate noundef attribute.
2089         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2090         auto &NoUndefAA =
2091             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2092         if (!NoUndefAA.isKnownNoUndef())
2093           continue;
2094         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2095             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2096         if (!ValueSimplifyAA.isKnown())
2097           continue;
2098         Optional<Value *> SimplifiedVal =
2099             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2100         if (!SimplifiedVal.hasValue() ||
2101             isa<UndefValue>(*SimplifiedVal.getValue())) {
2102           KnownUBInsts.insert(&I);
2103           continue;
2104         }
2105         if (!ArgVal->getType()->isPointerTy() ||
2106             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2107           continue;
2108         auto &NonNullAA =
2109             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2110         if (NonNullAA.isKnownNonNull())
2111           KnownUBInsts.insert(&I);
2112       }
2113       return true;
2114     };
2115 
2116     auto InspectReturnInstForUB =
2117         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2118           // Check if a return instruction always cause UB or not
2119           // Note: It is guaranteed that the returned position of the anchor
2120           //       scope has noundef attribute when this is called.
2121           //       We also ensure the return position is not "assumed dead"
2122           //       because the returned value was then potentially simplified to
2123           //       `undef` in AAReturnedValues without removing the `noundef`
2124           //       attribute yet.
2125 
2126           // When the returned position has noundef attriubte, UB occur in the
2127           // following cases.
2128           //   (1) Returned value is known to be undef.
2129           //   (2) The value is known to be a null pointer and the returned
2130           //       position has nonnull attribute (because the returned value is
2131           //       poison).
2132           bool FoundUB = false;
2133           if (isa<UndefValue>(V)) {
2134             FoundUB = true;
2135           } else {
2136             if (isa<ConstantPointerNull>(V)) {
2137               auto &NonNullAA = A.getAAFor<AANonNull>(
2138                   *this, IRPosition::returned(*getAnchorScope()),
2139                   DepClassTy::NONE);
2140               if (NonNullAA.isKnownNonNull())
2141                 FoundUB = true;
2142             }
2143           }
2144 
2145           if (FoundUB)
2146             for (ReturnInst *RI : RetInsts)
2147               KnownUBInsts.insert(RI);
2148           return true;
2149         };
2150 
2151     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2152                               {Instruction::Load, Instruction::Store,
2153                                Instruction::AtomicCmpXchg,
2154                                Instruction::AtomicRMW},
2155                               /* CheckBBLivenessOnly */ true);
2156     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2157                               /* CheckBBLivenessOnly */ true);
2158     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2159 
2160     // If the returned position of the anchor scope has noundef attriubte, check
2161     // all returned instructions.
2162     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2163       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2164       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2165         auto &RetPosNoUndefAA =
2166             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2167         if (RetPosNoUndefAA.isKnownNoUndef())
2168           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2169                                                     *this);
2170       }
2171     }
2172 
2173     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2174         UBPrevSize != KnownUBInsts.size())
2175       return ChangeStatus::CHANGED;
2176     return ChangeStatus::UNCHANGED;
2177   }
2178 
2179   bool isKnownToCauseUB(Instruction *I) const override {
2180     return KnownUBInsts.count(I);
2181   }
2182 
2183   bool isAssumedToCauseUB(Instruction *I) const override {
2184     // In simple words, if an instruction is not in the assumed to _not_
2185     // cause UB, then it is assumed UB (that includes those
2186     // in the KnownUBInsts set). The rest is boilerplate
2187     // is to ensure that it is one of the instructions we test
2188     // for UB.
2189 
2190     switch (I->getOpcode()) {
2191     case Instruction::Load:
2192     case Instruction::Store:
2193     case Instruction::AtomicCmpXchg:
2194     case Instruction::AtomicRMW:
2195       return !AssumedNoUBInsts.count(I);
2196     case Instruction::Br: {
2197       auto BrInst = cast<BranchInst>(I);
2198       if (BrInst->isUnconditional())
2199         return false;
2200       return !AssumedNoUBInsts.count(I);
2201     } break;
2202     default:
2203       return false;
2204     }
2205     return false;
2206   }
2207 
2208   ChangeStatus manifest(Attributor &A) override {
2209     if (KnownUBInsts.empty())
2210       return ChangeStatus::UNCHANGED;
2211     for (Instruction *I : KnownUBInsts)
2212       A.changeToUnreachableAfterManifest(I);
2213     return ChangeStatus::CHANGED;
2214   }
2215 
2216   /// See AbstractAttribute::getAsStr()
2217   const std::string getAsStr() const override {
2218     return getAssumed() ? "undefined-behavior" : "no-ub";
2219   }
2220 
2221   /// Note: The correctness of this analysis depends on the fact that the
2222   /// following 2 sets will stop changing after some point.
2223   /// "Change" here means that their size changes.
2224   /// The size of each set is monotonically increasing
2225   /// (we only add items to them) and it is upper bounded by the number of
2226   /// instructions in the processed function (we can never save more
2227   /// elements in either set than this number). Hence, at some point,
2228   /// they will stop increasing.
2229   /// Consequently, at some point, both sets will have stopped
2230   /// changing, effectively making the analysis reach a fixpoint.
2231 
2232   /// Note: These 2 sets are disjoint and an instruction can be considered
2233   /// one of 3 things:
2234   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2235   ///    the KnownUBInsts set.
2236   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2237   ///    has a reason to assume it).
2238   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2239   ///    could not find a reason to assume or prove that it can cause UB,
2240   ///    hence it assumes it doesn't. We have a set for these instructions
2241   ///    so that we don't reprocess them in every update.
2242   ///    Note however that instructions in this set may cause UB.
2243 
2244 protected:
2245   /// A set of all live instructions _known_ to cause UB.
2246   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2247 
2248 private:
2249   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2250   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2251 
2252   // Should be called on updates in which if we're processing an instruction
2253   // \p I that depends on a value \p V, one of the following has to happen:
2254   // - If the value is assumed, then stop.
2255   // - If the value is known but undef, then consider it UB.
2256   // - Otherwise, do specific processing with the simplified value.
2257   // We return None in the first 2 cases to signify that an appropriate
2258   // action was taken and the caller should stop.
2259   // Otherwise, we return the simplified value that the caller should
2260   // use for specific processing.
2261   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2262                                          Instruction *I) {
2263     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2264         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2265     Optional<Value *> SimplifiedV =
2266         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2267     if (!ValueSimplifyAA.isKnown()) {
2268       // Don't depend on assumed values.
2269       return llvm::None;
2270     }
2271     if (!SimplifiedV.hasValue()) {
2272       // If it is known (which we tested above) but it doesn't have a value,
2273       // then we can assume `undef` and hence the instruction is UB.
2274       KnownUBInsts.insert(I);
2275       return llvm::None;
2276     }
2277     Value *Val = SimplifiedV.getValue();
2278     if (isa<UndefValue>(Val)) {
2279       KnownUBInsts.insert(I);
2280       return llvm::None;
2281     }
2282     return Val;
2283   }
2284 };
2285 
2286 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2287   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2288       : AAUndefinedBehaviorImpl(IRP, A) {}
2289 
2290   /// See AbstractAttribute::trackStatistics()
2291   void trackStatistics() const override {
2292     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2293                "Number of instructions known to have UB");
2294     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2295         KnownUBInsts.size();
2296   }
2297 };
2298 
2299 /// ------------------------ Will-Return Attributes ----------------------------
2300 
2301 // Helper function that checks whether a function has any cycle which we don't
2302 // know if it is bounded or not.
2303 // Loops with maximum trip count are considered bounded, any other cycle not.
2304 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2305   ScalarEvolution *SE =
2306       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2307   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2308   // If either SCEV or LoopInfo is not available for the function then we assume
2309   // any cycle to be unbounded cycle.
2310   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2311   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2312   if (!SE || !LI) {
2313     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2314       if (SCCI.hasCycle())
2315         return true;
2316     return false;
2317   }
2318 
2319   // If there's irreducible control, the function may contain non-loop cycles.
2320   if (mayContainIrreducibleControl(F, LI))
2321     return true;
2322 
2323   // Any loop that does not have a max trip count is considered unbounded cycle.
2324   for (auto *L : LI->getLoopsInPreorder()) {
2325     if (!SE->getSmallConstantMaxTripCount(L))
2326       return true;
2327   }
2328   return false;
2329 }
2330 
2331 struct AAWillReturnImpl : public AAWillReturn {
2332   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2333       : AAWillReturn(IRP, A) {}
2334 
2335   /// See AbstractAttribute::initialize(...).
2336   void initialize(Attributor &A) override {
2337     AAWillReturn::initialize(A);
2338 
2339     Function *F = getAnchorScope();
2340     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2341       indicatePessimisticFixpoint();
2342   }
2343 
2344   /// See AbstractAttribute::updateImpl(...).
2345   ChangeStatus updateImpl(Attributor &A) override {
2346     auto CheckForWillReturn = [&](Instruction &I) {
2347       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2348       const auto &WillReturnAA =
2349           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2350       if (WillReturnAA.isKnownWillReturn())
2351         return true;
2352       if (!WillReturnAA.isAssumedWillReturn())
2353         return false;
2354       const auto &NoRecurseAA =
2355           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2356       return NoRecurseAA.isAssumedNoRecurse();
2357     };
2358 
2359     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2360       return indicatePessimisticFixpoint();
2361 
2362     return ChangeStatus::UNCHANGED;
2363   }
2364 
2365   /// See AbstractAttribute::getAsStr()
2366   const std::string getAsStr() const override {
2367     return getAssumed() ? "willreturn" : "may-noreturn";
2368   }
2369 };
2370 
2371 struct AAWillReturnFunction final : AAWillReturnImpl {
2372   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2373       : AAWillReturnImpl(IRP, A) {}
2374 
2375   /// See AbstractAttribute::trackStatistics()
2376   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2377 };
2378 
2379 /// WillReturn attribute deduction for a call sites.
2380 struct AAWillReturnCallSite final : AAWillReturnImpl {
2381   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2382       : AAWillReturnImpl(IRP, A) {}
2383 
2384   /// See AbstractAttribute::initialize(...).
2385   void initialize(Attributor &A) override {
2386     AAWillReturn::initialize(A);
2387     Function *F = getAssociatedFunction();
2388     if (!F || !A.isFunctionIPOAmendable(*F))
2389       indicatePessimisticFixpoint();
2390   }
2391 
2392   /// See AbstractAttribute::updateImpl(...).
2393   ChangeStatus updateImpl(Attributor &A) override {
2394     // TODO: Once we have call site specific value information we can provide
2395     //       call site specific liveness information and then it makes
2396     //       sense to specialize attributes for call sites arguments instead of
2397     //       redirecting requests to the callee argument.
2398     Function *F = getAssociatedFunction();
2399     const IRPosition &FnPos = IRPosition::function(*F);
2400     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2401     return clampStateAndIndicateChange(getState(), FnAA.getState());
2402   }
2403 
2404   /// See AbstractAttribute::trackStatistics()
2405   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2406 };
2407 
2408 /// -------------------AAReachability Attribute--------------------------
2409 
2410 struct AAReachabilityImpl : AAReachability {
2411   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2412       : AAReachability(IRP, A) {}
2413 
2414   const std::string getAsStr() const override {
2415     // TODO: Return the number of reachable queries.
2416     return "reachable";
2417   }
2418 
2419   /// See AbstractAttribute::initialize(...).
2420   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2421 
2422   /// See AbstractAttribute::updateImpl(...).
2423   ChangeStatus updateImpl(Attributor &A) override {
2424     return indicatePessimisticFixpoint();
2425   }
2426 };
2427 
2428 struct AAReachabilityFunction final : public AAReachabilityImpl {
2429   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2430       : AAReachabilityImpl(IRP, A) {}
2431 
2432   /// See AbstractAttribute::trackStatistics()
2433   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2434 };
2435 
2436 /// ------------------------ NoAlias Argument Attribute ------------------------
2437 
2438 struct AANoAliasImpl : AANoAlias {
2439   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2440     assert(getAssociatedType()->isPointerTy() &&
2441            "Noalias is a pointer attribute");
2442   }
2443 
2444   const std::string getAsStr() const override {
2445     return getAssumed() ? "noalias" : "may-alias";
2446   }
2447 };
2448 
2449 /// NoAlias attribute for a floating value.
2450 struct AANoAliasFloating final : AANoAliasImpl {
2451   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2452       : AANoAliasImpl(IRP, A) {}
2453 
2454   /// See AbstractAttribute::initialize(...).
2455   void initialize(Attributor &A) override {
2456     AANoAliasImpl::initialize(A);
2457     Value *Val = &getAssociatedValue();
2458     do {
2459       CastInst *CI = dyn_cast<CastInst>(Val);
2460       if (!CI)
2461         break;
2462       Value *Base = CI->getOperand(0);
2463       if (!Base->hasOneUse())
2464         break;
2465       Val = Base;
2466     } while (true);
2467 
2468     if (!Val->getType()->isPointerTy()) {
2469       indicatePessimisticFixpoint();
2470       return;
2471     }
2472 
2473     if (isa<AllocaInst>(Val))
2474       indicateOptimisticFixpoint();
2475     else if (isa<ConstantPointerNull>(Val) &&
2476              !NullPointerIsDefined(getAnchorScope(),
2477                                    Val->getType()->getPointerAddressSpace()))
2478       indicateOptimisticFixpoint();
2479     else if (Val != &getAssociatedValue()) {
2480       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2481           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2482       if (ValNoAliasAA.isKnownNoAlias())
2483         indicateOptimisticFixpoint();
2484     }
2485   }
2486 
2487   /// See AbstractAttribute::updateImpl(...).
2488   ChangeStatus updateImpl(Attributor &A) override {
2489     // TODO: Implement this.
2490     return indicatePessimisticFixpoint();
2491   }
2492 
2493   /// See AbstractAttribute::trackStatistics()
2494   void trackStatistics() const override {
2495     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2496   }
2497 };
2498 
2499 /// NoAlias attribute for an argument.
2500 struct AANoAliasArgument final
2501     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2502   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2503   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2504 
2505   /// See AbstractAttribute::initialize(...).
2506   void initialize(Attributor &A) override {
2507     Base::initialize(A);
2508     // See callsite argument attribute and callee argument attribute.
2509     if (hasAttr({Attribute::ByVal}))
2510       indicateOptimisticFixpoint();
2511   }
2512 
2513   /// See AbstractAttribute::update(...).
2514   ChangeStatus updateImpl(Attributor &A) override {
2515     // We have to make sure no-alias on the argument does not break
2516     // synchronization when this is a callback argument, see also [1] below.
2517     // If synchronization cannot be affected, we delegate to the base updateImpl
2518     // function, otherwise we give up for now.
2519 
2520     // If the function is no-sync, no-alias cannot break synchronization.
2521     const auto &NoSyncAA =
2522         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2523                              DepClassTy::OPTIONAL);
2524     if (NoSyncAA.isAssumedNoSync())
2525       return Base::updateImpl(A);
2526 
2527     // If the argument is read-only, no-alias cannot break synchronization.
2528     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2529         *this, getIRPosition(), DepClassTy::OPTIONAL);
2530     if (MemBehaviorAA.isAssumedReadOnly())
2531       return Base::updateImpl(A);
2532 
2533     // If the argument is never passed through callbacks, no-alias cannot break
2534     // synchronization.
2535     bool AllCallSitesKnown;
2536     if (A.checkForAllCallSites(
2537             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2538             true, AllCallSitesKnown))
2539       return Base::updateImpl(A);
2540 
2541     // TODO: add no-alias but make sure it doesn't break synchronization by
2542     // introducing fake uses. See:
2543     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2544     //     International Workshop on OpenMP 2018,
2545     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2546 
2547     return indicatePessimisticFixpoint();
2548   }
2549 
2550   /// See AbstractAttribute::trackStatistics()
2551   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2552 };
2553 
2554 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2555   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2556       : AANoAliasImpl(IRP, A) {}
2557 
2558   /// See AbstractAttribute::initialize(...).
2559   void initialize(Attributor &A) override {
2560     // See callsite argument attribute and callee argument attribute.
2561     const auto &CB = cast<CallBase>(getAnchorValue());
2562     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2563       indicateOptimisticFixpoint();
2564     Value &Val = getAssociatedValue();
2565     if (isa<ConstantPointerNull>(Val) &&
2566         !NullPointerIsDefined(getAnchorScope(),
2567                               Val.getType()->getPointerAddressSpace()))
2568       indicateOptimisticFixpoint();
2569   }
2570 
2571   /// Determine if the underlying value may alias with the call site argument
2572   /// \p OtherArgNo of \p ICS (= the underlying call site).
2573   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2574                             const AAMemoryBehavior &MemBehaviorAA,
2575                             const CallBase &CB, unsigned OtherArgNo) {
2576     // We do not need to worry about aliasing with the underlying IRP.
2577     if (this->getCalleeArgNo() == (int)OtherArgNo)
2578       return false;
2579 
2580     // If it is not a pointer or pointer vector we do not alias.
2581     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2582     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2583       return false;
2584 
2585     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2586         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2587 
2588     // If the argument is readnone, there is no read-write aliasing.
2589     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2590       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2591       return false;
2592     }
2593 
2594     // If the argument is readonly and the underlying value is readonly, there
2595     // is no read-write aliasing.
2596     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2597     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2598       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2599       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2600       return false;
2601     }
2602 
2603     // We have to utilize actual alias analysis queries so we need the object.
2604     if (!AAR)
2605       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2606 
2607     // Try to rule it out at the call site.
2608     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2609     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2610                          "callsite arguments: "
2611                       << getAssociatedValue() << " " << *ArgOp << " => "
2612                       << (IsAliasing ? "" : "no-") << "alias \n");
2613 
2614     return IsAliasing;
2615   }
2616 
2617   bool
2618   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2619                                          const AAMemoryBehavior &MemBehaviorAA,
2620                                          const AANoAlias &NoAliasAA) {
2621     // We can deduce "noalias" if the following conditions hold.
2622     // (i)   Associated value is assumed to be noalias in the definition.
2623     // (ii)  Associated value is assumed to be no-capture in all the uses
2624     //       possibly executed before this callsite.
2625     // (iii) There is no other pointer argument which could alias with the
2626     //       value.
2627 
2628     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2629     if (!AssociatedValueIsNoAliasAtDef) {
2630       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2631                         << " is not no-alias at the definition\n");
2632       return false;
2633     }
2634 
2635     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2636 
2637     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2638     const Function *ScopeFn = VIRP.getAnchorScope();
2639     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2640     // Check whether the value is captured in the scope using AANoCapture.
2641     //      Look at CFG and check only uses possibly executed before this
2642     //      callsite.
2643     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2644       Instruction *UserI = cast<Instruction>(U.getUser());
2645 
2646       // If UserI is the curr instruction and there is a single potential use of
2647       // the value in UserI we allow the use.
2648       // TODO: We should inspect the operands and allow those that cannot alias
2649       //       with the value.
2650       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2651         return true;
2652 
2653       if (ScopeFn) {
2654         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2655             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2656 
2657         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2658           return true;
2659 
2660         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2661           if (CB->isArgOperand(&U)) {
2662 
2663             unsigned ArgNo = CB->getArgOperandNo(&U);
2664 
2665             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2666                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2667                 DepClassTy::OPTIONAL);
2668 
2669             if (NoCaptureAA.isAssumedNoCapture())
2670               return true;
2671           }
2672         }
2673       }
2674 
2675       // For cases which can potentially have more users
2676       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2677           isa<SelectInst>(U)) {
2678         Follow = true;
2679         return true;
2680       }
2681 
2682       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2683       return false;
2684     };
2685 
2686     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2687       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2688         LLVM_DEBUG(
2689             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2690                    << " cannot be noalias as it is potentially captured\n");
2691         return false;
2692       }
2693     }
2694     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2695 
2696     // Check there is no other pointer argument which could alias with the
2697     // value passed at this call site.
2698     // TODO: AbstractCallSite
2699     const auto &CB = cast<CallBase>(getAnchorValue());
2700     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2701          OtherArgNo++)
2702       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2703         return false;
2704 
2705     return true;
2706   }
2707 
2708   /// See AbstractAttribute::updateImpl(...).
2709   ChangeStatus updateImpl(Attributor &A) override {
2710     // If the argument is readnone we are done as there are no accesses via the
2711     // argument.
2712     auto &MemBehaviorAA =
2713         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2714     if (MemBehaviorAA.isAssumedReadNone()) {
2715       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2716       return ChangeStatus::UNCHANGED;
2717     }
2718 
2719     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2720     const auto &NoAliasAA =
2721         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2722 
2723     AAResults *AAR = nullptr;
2724     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2725                                                NoAliasAA)) {
2726       LLVM_DEBUG(
2727           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2728       return ChangeStatus::UNCHANGED;
2729     }
2730 
2731     return indicatePessimisticFixpoint();
2732   }
2733 
2734   /// See AbstractAttribute::trackStatistics()
2735   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2736 };
2737 
2738 /// NoAlias attribute for function return value.
2739 struct AANoAliasReturned final : AANoAliasImpl {
2740   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2741       : AANoAliasImpl(IRP, A) {}
2742 
2743   /// See AbstractAttribute::initialize(...).
2744   void initialize(Attributor &A) override {
2745     AANoAliasImpl::initialize(A);
2746     Function *F = getAssociatedFunction();
2747     if (!F || F->isDeclaration())
2748       indicatePessimisticFixpoint();
2749   }
2750 
2751   /// See AbstractAttribute::updateImpl(...).
2752   virtual ChangeStatus updateImpl(Attributor &A) override {
2753 
2754     auto CheckReturnValue = [&](Value &RV) -> bool {
2755       if (Constant *C = dyn_cast<Constant>(&RV))
2756         if (C->isNullValue() || isa<UndefValue>(C))
2757           return true;
2758 
2759       /// For now, we can only deduce noalias if we have call sites.
2760       /// FIXME: add more support.
2761       if (!isa<CallBase>(&RV))
2762         return false;
2763 
2764       const IRPosition &RVPos = IRPosition::value(RV);
2765       const auto &NoAliasAA =
2766           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2767       if (!NoAliasAA.isAssumedNoAlias())
2768         return false;
2769 
2770       const auto &NoCaptureAA =
2771           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2772       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2773     };
2774 
2775     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2776       return indicatePessimisticFixpoint();
2777 
2778     return ChangeStatus::UNCHANGED;
2779   }
2780 
2781   /// See AbstractAttribute::trackStatistics()
2782   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2783 };
2784 
2785 /// NoAlias attribute deduction for a call site return value.
2786 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2787   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2788       : AANoAliasImpl(IRP, A) {}
2789 
2790   /// See AbstractAttribute::initialize(...).
2791   void initialize(Attributor &A) override {
2792     AANoAliasImpl::initialize(A);
2793     Function *F = getAssociatedFunction();
2794     if (!F || F->isDeclaration())
2795       indicatePessimisticFixpoint();
2796   }
2797 
2798   /// See AbstractAttribute::updateImpl(...).
2799   ChangeStatus updateImpl(Attributor &A) override {
2800     // TODO: Once we have call site specific value information we can provide
2801     //       call site specific liveness information and then it makes
2802     //       sense to specialize attributes for call sites arguments instead of
2803     //       redirecting requests to the callee argument.
2804     Function *F = getAssociatedFunction();
2805     const IRPosition &FnPos = IRPosition::returned(*F);
2806     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2807     return clampStateAndIndicateChange(getState(), FnAA.getState());
2808   }
2809 
2810   /// See AbstractAttribute::trackStatistics()
2811   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2812 };
2813 
2814 /// -------------------AAIsDead Function Attribute-----------------------
2815 
2816 struct AAIsDeadValueImpl : public AAIsDead {
2817   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2818 
2819   /// See AAIsDead::isAssumedDead().
2820   bool isAssumedDead() const override { return getAssumed(); }
2821 
2822   /// See AAIsDead::isKnownDead().
2823   bool isKnownDead() const override { return getKnown(); }
2824 
2825   /// See AAIsDead::isAssumedDead(BasicBlock *).
2826   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2827 
2828   /// See AAIsDead::isKnownDead(BasicBlock *).
2829   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2830 
2831   /// See AAIsDead::isAssumedDead(Instruction *I).
2832   bool isAssumedDead(const Instruction *I) const override {
2833     return I == getCtxI() && isAssumedDead();
2834   }
2835 
2836   /// See AAIsDead::isKnownDead(Instruction *I).
2837   bool isKnownDead(const Instruction *I) const override {
2838     return isAssumedDead(I) && getKnown();
2839   }
2840 
2841   /// See AbstractAttribute::getAsStr().
2842   const std::string getAsStr() const override {
2843     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2844   }
2845 
2846   /// Check if all uses are assumed dead.
2847   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2848     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2849     // Explicitly set the dependence class to required because we want a long
2850     // chain of N dependent instructions to be considered live as soon as one is
2851     // without going through N update cycles. This is not required for
2852     // correctness.
2853     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2854   }
2855 
2856   /// Determine if \p I is assumed to be side-effect free.
2857   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2858     if (!I || wouldInstructionBeTriviallyDead(I))
2859       return true;
2860 
2861     auto *CB = dyn_cast<CallBase>(I);
2862     if (!CB || isa<IntrinsicInst>(CB))
2863       return false;
2864 
2865     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2866     const auto &NoUnwindAA =
2867         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2868     if (!NoUnwindAA.isAssumedNoUnwind())
2869       return false;
2870     if (!NoUnwindAA.isKnownNoUnwind())
2871       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2872 
2873     const auto &MemBehaviorAA =
2874         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2875     if (MemBehaviorAA.isAssumedReadOnly()) {
2876       if (!MemBehaviorAA.isKnownReadOnly())
2877         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2878       return true;
2879     }
2880     return false;
2881   }
2882 };
2883 
2884 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2885   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2886       : AAIsDeadValueImpl(IRP, A) {}
2887 
2888   /// See AbstractAttribute::initialize(...).
2889   void initialize(Attributor &A) override {
2890     if (isa<UndefValue>(getAssociatedValue())) {
2891       indicatePessimisticFixpoint();
2892       return;
2893     }
2894 
2895     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2896     if (!isAssumedSideEffectFree(A, I))
2897       indicatePessimisticFixpoint();
2898   }
2899 
2900   /// See AbstractAttribute::updateImpl(...).
2901   ChangeStatus updateImpl(Attributor &A) override {
2902     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2903     if (!isAssumedSideEffectFree(A, I))
2904       return indicatePessimisticFixpoint();
2905 
2906     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2907       return indicatePessimisticFixpoint();
2908     return ChangeStatus::UNCHANGED;
2909   }
2910 
2911   /// See AbstractAttribute::manifest(...).
2912   ChangeStatus manifest(Attributor &A) override {
2913     Value &V = getAssociatedValue();
2914     if (auto *I = dyn_cast<Instruction>(&V)) {
2915       // If we get here we basically know the users are all dead. We check if
2916       // isAssumedSideEffectFree returns true here again because it might not be
2917       // the case and only the users are dead but the instruction (=call) is
2918       // still needed.
2919       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2920         A.deleteAfterManifest(*I);
2921         return ChangeStatus::CHANGED;
2922       }
2923     }
2924     if (V.use_empty())
2925       return ChangeStatus::UNCHANGED;
2926 
2927     bool UsedAssumedInformation = false;
2928     Optional<Constant *> C =
2929         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2930     if (C.hasValue() && C.getValue())
2931       return ChangeStatus::UNCHANGED;
2932 
2933     // Replace the value with undef as it is dead but keep droppable uses around
2934     // as they provide information we don't want to give up on just yet.
2935     UndefValue &UV = *UndefValue::get(V.getType());
2936     bool AnyChange =
2937         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2938     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2939   }
2940 
2941   /// See AbstractAttribute::trackStatistics()
2942   void trackStatistics() const override {
2943     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2944   }
2945 };
2946 
2947 struct AAIsDeadArgument : public AAIsDeadFloating {
2948   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2949       : AAIsDeadFloating(IRP, A) {}
2950 
2951   /// See AbstractAttribute::initialize(...).
2952   void initialize(Attributor &A) override {
2953     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2954       indicatePessimisticFixpoint();
2955   }
2956 
2957   /// See AbstractAttribute::manifest(...).
2958   ChangeStatus manifest(Attributor &A) override {
2959     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2960     Argument &Arg = *getAssociatedArgument();
2961     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2962       if (A.registerFunctionSignatureRewrite(
2963               Arg, /* ReplacementTypes */ {},
2964               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2965               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2966         Arg.dropDroppableUses();
2967         return ChangeStatus::CHANGED;
2968       }
2969     return Changed;
2970   }
2971 
2972   /// See AbstractAttribute::trackStatistics()
2973   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2974 };
2975 
2976 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2977   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2978       : AAIsDeadValueImpl(IRP, A) {}
2979 
2980   /// See AbstractAttribute::initialize(...).
2981   void initialize(Attributor &A) override {
2982     if (isa<UndefValue>(getAssociatedValue()))
2983       indicatePessimisticFixpoint();
2984   }
2985 
2986   /// See AbstractAttribute::updateImpl(...).
2987   ChangeStatus updateImpl(Attributor &A) override {
2988     // TODO: Once we have call site specific value information we can provide
2989     //       call site specific liveness information and then it makes
2990     //       sense to specialize attributes for call sites arguments instead of
2991     //       redirecting requests to the callee argument.
2992     Argument *Arg = getAssociatedArgument();
2993     if (!Arg)
2994       return indicatePessimisticFixpoint();
2995     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2996     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2997     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2998   }
2999 
3000   /// See AbstractAttribute::manifest(...).
3001   ChangeStatus manifest(Attributor &A) override {
3002     CallBase &CB = cast<CallBase>(getAnchorValue());
3003     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3004     assert(!isa<UndefValue>(U.get()) &&
3005            "Expected undef values to be filtered out!");
3006     UndefValue &UV = *UndefValue::get(U->getType());
3007     if (A.changeUseAfterManifest(U, UV))
3008       return ChangeStatus::CHANGED;
3009     return ChangeStatus::UNCHANGED;
3010   }
3011 
3012   /// See AbstractAttribute::trackStatistics()
3013   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3014 };
3015 
3016 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3017   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3018       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3019 
3020   /// See AAIsDead::isAssumedDead().
3021   bool isAssumedDead() const override {
3022     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3023   }
3024 
3025   /// See AbstractAttribute::initialize(...).
3026   void initialize(Attributor &A) override {
3027     if (isa<UndefValue>(getAssociatedValue())) {
3028       indicatePessimisticFixpoint();
3029       return;
3030     }
3031 
3032     // We track this separately as a secondary state.
3033     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3034   }
3035 
3036   /// See AbstractAttribute::updateImpl(...).
3037   ChangeStatus updateImpl(Attributor &A) override {
3038     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3039     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3040       IsAssumedSideEffectFree = false;
3041       Changed = ChangeStatus::CHANGED;
3042     }
3043 
3044     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3045       return indicatePessimisticFixpoint();
3046     return Changed;
3047   }
3048 
3049   /// See AbstractAttribute::trackStatistics()
3050   void trackStatistics() const override {
3051     if (IsAssumedSideEffectFree)
3052       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3053     else
3054       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3055   }
3056 
3057   /// See AbstractAttribute::getAsStr().
3058   const std::string getAsStr() const override {
3059     return isAssumedDead()
3060                ? "assumed-dead"
3061                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3062   }
3063 
3064 private:
3065   bool IsAssumedSideEffectFree;
3066 };
3067 
3068 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3069   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3070       : AAIsDeadValueImpl(IRP, A) {}
3071 
3072   /// See AbstractAttribute::updateImpl(...).
3073   ChangeStatus updateImpl(Attributor &A) override {
3074 
3075     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3076                               {Instruction::Ret});
3077 
3078     auto PredForCallSite = [&](AbstractCallSite ACS) {
3079       if (ACS.isCallbackCall() || !ACS.getInstruction())
3080         return false;
3081       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3082     };
3083 
3084     bool AllCallSitesKnown;
3085     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3086                                 AllCallSitesKnown))
3087       return indicatePessimisticFixpoint();
3088 
3089     return ChangeStatus::UNCHANGED;
3090   }
3091 
3092   /// See AbstractAttribute::manifest(...).
3093   ChangeStatus manifest(Attributor &A) override {
3094     // TODO: Rewrite the signature to return void?
3095     bool AnyChange = false;
3096     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3097     auto RetInstPred = [&](Instruction &I) {
3098       ReturnInst &RI = cast<ReturnInst>(I);
3099       if (!isa<UndefValue>(RI.getReturnValue()))
3100         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3101       return true;
3102     };
3103     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3104     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3105   }
3106 
3107   /// See AbstractAttribute::trackStatistics()
3108   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3109 };
3110 
3111 struct AAIsDeadFunction : public AAIsDead {
3112   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3113 
3114   /// See AbstractAttribute::initialize(...).
3115   void initialize(Attributor &A) override {
3116     const Function *F = getAnchorScope();
3117     if (F && !F->isDeclaration()) {
3118       // We only want to compute liveness once. If the function is not part of
3119       // the SCC, skip it.
3120       if (A.isRunOn(*const_cast<Function *>(F))) {
3121         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3122         assumeLive(A, F->getEntryBlock());
3123       } else {
3124         indicatePessimisticFixpoint();
3125       }
3126     }
3127   }
3128 
3129   /// See AbstractAttribute::getAsStr().
3130   const std::string getAsStr() const override {
3131     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3132            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3133            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3134            std::to_string(KnownDeadEnds.size()) + "]";
3135   }
3136 
3137   /// See AbstractAttribute::manifest(...).
3138   ChangeStatus manifest(Attributor &A) override {
3139     assert(getState().isValidState() &&
3140            "Attempted to manifest an invalid state!");
3141 
3142     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3143     Function &F = *getAnchorScope();
3144 
3145     if (AssumedLiveBlocks.empty()) {
3146       A.deleteAfterManifest(F);
3147       return ChangeStatus::CHANGED;
3148     }
3149 
3150     // Flag to determine if we can change an invoke to a call assuming the
3151     // callee is nounwind. This is not possible if the personality of the
3152     // function allows to catch asynchronous exceptions.
3153     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3154 
3155     KnownDeadEnds.set_union(ToBeExploredFrom);
3156     for (const Instruction *DeadEndI : KnownDeadEnds) {
3157       auto *CB = dyn_cast<CallBase>(DeadEndI);
3158       if (!CB)
3159         continue;
3160       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3161           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3162       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3163       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3164         continue;
3165 
3166       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3167         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3168       else
3169         A.changeToUnreachableAfterManifest(
3170             const_cast<Instruction *>(DeadEndI->getNextNode()));
3171       HasChanged = ChangeStatus::CHANGED;
3172     }
3173 
3174     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3175     for (BasicBlock &BB : F)
3176       if (!AssumedLiveBlocks.count(&BB)) {
3177         A.deleteAfterManifest(BB);
3178         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3179       }
3180 
3181     return HasChanged;
3182   }
3183 
3184   /// See AbstractAttribute::updateImpl(...).
3185   ChangeStatus updateImpl(Attributor &A) override;
3186 
3187   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3188     return !AssumedLiveEdges.count(std::make_pair(From, To));
3189   }
3190 
3191   /// See AbstractAttribute::trackStatistics()
3192   void trackStatistics() const override {}
3193 
3194   /// Returns true if the function is assumed dead.
3195   bool isAssumedDead() const override { return false; }
3196 
3197   /// See AAIsDead::isKnownDead().
3198   bool isKnownDead() const override { return false; }
3199 
3200   /// See AAIsDead::isAssumedDead(BasicBlock *).
3201   bool isAssumedDead(const BasicBlock *BB) const override {
3202     assert(BB->getParent() == getAnchorScope() &&
3203            "BB must be in the same anchor scope function.");
3204 
3205     if (!getAssumed())
3206       return false;
3207     return !AssumedLiveBlocks.count(BB);
3208   }
3209 
3210   /// See AAIsDead::isKnownDead(BasicBlock *).
3211   bool isKnownDead(const BasicBlock *BB) const override {
3212     return getKnown() && isAssumedDead(BB);
3213   }
3214 
3215   /// See AAIsDead::isAssumed(Instruction *I).
3216   bool isAssumedDead(const Instruction *I) const override {
3217     assert(I->getParent()->getParent() == getAnchorScope() &&
3218            "Instruction must be in the same anchor scope function.");
3219 
3220     if (!getAssumed())
3221       return false;
3222 
3223     // If it is not in AssumedLiveBlocks then it for sure dead.
3224     // Otherwise, it can still be after noreturn call in a live block.
3225     if (!AssumedLiveBlocks.count(I->getParent()))
3226       return true;
3227 
3228     // If it is not after a liveness barrier it is live.
3229     const Instruction *PrevI = I->getPrevNode();
3230     while (PrevI) {
3231       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3232         return true;
3233       PrevI = PrevI->getPrevNode();
3234     }
3235     return false;
3236   }
3237 
3238   /// See AAIsDead::isKnownDead(Instruction *I).
3239   bool isKnownDead(const Instruction *I) const override {
3240     return getKnown() && isAssumedDead(I);
3241   }
3242 
3243   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3244   /// that internal function called from \p BB should now be looked at.
3245   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3246     if (!AssumedLiveBlocks.insert(&BB).second)
3247       return false;
3248 
3249     // We assume that all of BB is (probably) live now and if there are calls to
3250     // internal functions we will assume that those are now live as well. This
3251     // is a performance optimization for blocks with calls to a lot of internal
3252     // functions. It can however cause dead functions to be treated as live.
3253     for (const Instruction &I : BB)
3254       if (const auto *CB = dyn_cast<CallBase>(&I))
3255         if (const Function *F = CB->getCalledFunction())
3256           if (F->hasLocalLinkage())
3257             A.markLiveInternalFunction(*F);
3258     return true;
3259   }
3260 
3261   /// Collection of instructions that need to be explored again, e.g., we
3262   /// did assume they do not transfer control to (one of their) successors.
3263   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3264 
3265   /// Collection of instructions that are known to not transfer control.
3266   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3267 
3268   /// Collection of all assumed live edges
3269   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3270 
3271   /// Collection of all assumed live BasicBlocks.
3272   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3273 };
3274 
3275 static bool
3276 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3277                         AbstractAttribute &AA,
3278                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3279   const IRPosition &IPos = IRPosition::callsite_function(CB);
3280 
3281   const auto &NoReturnAA =
3282       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3283   if (NoReturnAA.isAssumedNoReturn())
3284     return !NoReturnAA.isKnownNoReturn();
3285   if (CB.isTerminator())
3286     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3287   else
3288     AliveSuccessors.push_back(CB.getNextNode());
3289   return false;
3290 }
3291 
3292 static bool
3293 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3294                         AbstractAttribute &AA,
3295                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3296   bool UsedAssumedInformation =
3297       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3298 
3299   // First, determine if we can change an invoke to a call assuming the
3300   // callee is nounwind. This is not possible if the personality of the
3301   // function allows to catch asynchronous exceptions.
3302   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3303     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3304   } else {
3305     const IRPosition &IPos = IRPosition::callsite_function(II);
3306     const auto &AANoUnw =
3307         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3308     if (AANoUnw.isAssumedNoUnwind()) {
3309       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3310     } else {
3311       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3312     }
3313   }
3314   return UsedAssumedInformation;
3315 }
3316 
3317 static bool
3318 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3319                         AbstractAttribute &AA,
3320                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3321   bool UsedAssumedInformation = false;
3322   if (BI.getNumSuccessors() == 1) {
3323     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3324   } else {
3325     Optional<ConstantInt *> CI = getAssumedConstantInt(
3326         A, *BI.getCondition(), AA, UsedAssumedInformation);
3327     if (!CI.hasValue()) {
3328       // No value yet, assume both edges are dead.
3329     } else if (CI.getValue()) {
3330       const BasicBlock *SuccBB =
3331           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3332       AliveSuccessors.push_back(&SuccBB->front());
3333     } else {
3334       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3335       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3336       UsedAssumedInformation = false;
3337     }
3338   }
3339   return UsedAssumedInformation;
3340 }
3341 
3342 static bool
3343 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3344                         AbstractAttribute &AA,
3345                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3346   bool UsedAssumedInformation = false;
3347   Optional<ConstantInt *> CI =
3348       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3349   if (!CI.hasValue()) {
3350     // No value yet, assume all edges are dead.
3351   } else if (CI.getValue()) {
3352     for (auto &CaseIt : SI.cases()) {
3353       if (CaseIt.getCaseValue() == CI.getValue()) {
3354         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3355         return UsedAssumedInformation;
3356       }
3357     }
3358     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3359     return UsedAssumedInformation;
3360   } else {
3361     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3362       AliveSuccessors.push_back(&SuccBB->front());
3363   }
3364   return UsedAssumedInformation;
3365 }
3366 
3367 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3368   ChangeStatus Change = ChangeStatus::UNCHANGED;
3369 
3370   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3371                     << getAnchorScope()->size() << "] BBs and "
3372                     << ToBeExploredFrom.size() << " exploration points and "
3373                     << KnownDeadEnds.size() << " known dead ends\n");
3374 
3375   // Copy and clear the list of instructions we need to explore from. It is
3376   // refilled with instructions the next update has to look at.
3377   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3378                                                ToBeExploredFrom.end());
3379   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3380 
3381   SmallVector<const Instruction *, 8> AliveSuccessors;
3382   while (!Worklist.empty()) {
3383     const Instruction *I = Worklist.pop_back_val();
3384     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3385 
3386     // Fast forward for uninteresting instructions. We could look for UB here
3387     // though.
3388     while (!I->isTerminator() && !isa<CallBase>(I)) {
3389       Change = ChangeStatus::CHANGED;
3390       I = I->getNextNode();
3391     }
3392 
3393     AliveSuccessors.clear();
3394 
3395     bool UsedAssumedInformation = false;
3396     switch (I->getOpcode()) {
3397     // TODO: look for (assumed) UB to backwards propagate "deadness".
3398     default:
3399       assert(I->isTerminator() &&
3400              "Expected non-terminators to be handled already!");
3401       for (const BasicBlock *SuccBB : successors(I->getParent()))
3402         AliveSuccessors.push_back(&SuccBB->front());
3403       break;
3404     case Instruction::Call:
3405       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3406                                                        *this, AliveSuccessors);
3407       break;
3408     case Instruction::Invoke:
3409       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3410                                                        *this, AliveSuccessors);
3411       break;
3412     case Instruction::Br:
3413       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3414                                                        *this, AliveSuccessors);
3415       break;
3416     case Instruction::Switch:
3417       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3418                                                        *this, AliveSuccessors);
3419       break;
3420     }
3421 
3422     if (UsedAssumedInformation) {
3423       NewToBeExploredFrom.insert(I);
3424     } else {
3425       Change = ChangeStatus::CHANGED;
3426       if (AliveSuccessors.empty() ||
3427           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3428         KnownDeadEnds.insert(I);
3429     }
3430 
3431     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3432                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3433                       << UsedAssumedInformation << "\n");
3434 
3435     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3436       if (!I->isTerminator()) {
3437         assert(AliveSuccessors.size() == 1 &&
3438                "Non-terminator expected to have a single successor!");
3439         Worklist.push_back(AliveSuccessor);
3440       } else {
3441         // record the assumed live edge
3442         AssumedLiveEdges.insert(
3443             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3444         if (assumeLive(A, *AliveSuccessor->getParent()))
3445           Worklist.push_back(AliveSuccessor);
3446       }
3447     }
3448   }
3449 
3450   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3451 
3452   // If we know everything is live there is no need to query for liveness.
3453   // Instead, indicating a pessimistic fixpoint will cause the state to be
3454   // "invalid" and all queries to be answered conservatively without lookups.
3455   // To be in this state we have to (1) finished the exploration and (3) not
3456   // discovered any non-trivial dead end and (2) not ruled unreachable code
3457   // dead.
3458   if (ToBeExploredFrom.empty() &&
3459       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3460       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3461         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3462       }))
3463     return indicatePessimisticFixpoint();
3464   return Change;
3465 }
3466 
3467 /// Liveness information for a call sites.
3468 struct AAIsDeadCallSite final : AAIsDeadFunction {
3469   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3470       : AAIsDeadFunction(IRP, A) {}
3471 
3472   /// See AbstractAttribute::initialize(...).
3473   void initialize(Attributor &A) override {
3474     // TODO: Once we have call site specific value information we can provide
3475     //       call site specific liveness information and then it makes
3476     //       sense to specialize attributes for call sites instead of
3477     //       redirecting requests to the callee.
3478     llvm_unreachable("Abstract attributes for liveness are not "
3479                      "supported for call sites yet!");
3480   }
3481 
3482   /// See AbstractAttribute::updateImpl(...).
3483   ChangeStatus updateImpl(Attributor &A) override {
3484     return indicatePessimisticFixpoint();
3485   }
3486 
3487   /// See AbstractAttribute::trackStatistics()
3488   void trackStatistics() const override {}
3489 };
3490 
3491 /// -------------------- Dereferenceable Argument Attribute --------------------
3492 
3493 template <>
3494 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3495                                                      const DerefState &R) {
3496   ChangeStatus CS0 =
3497       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3498   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3499   return CS0 | CS1;
3500 }
3501 
3502 struct AADereferenceableImpl : AADereferenceable {
3503   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3504       : AADereferenceable(IRP, A) {}
3505   using StateType = DerefState;
3506 
3507   /// See AbstractAttribute::initialize(...).
3508   void initialize(Attributor &A) override {
3509     SmallVector<Attribute, 4> Attrs;
3510     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3511              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3512     for (const Attribute &Attr : Attrs)
3513       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3514 
3515     const IRPosition &IRP = this->getIRPosition();
3516     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3517 
3518     bool CanBeNull;
3519     takeKnownDerefBytesMaximum(
3520         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3521             A.getDataLayout(), CanBeNull));
3522 
3523     bool IsFnInterface = IRP.isFnInterfaceKind();
3524     Function *FnScope = IRP.getAnchorScope();
3525     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3526       indicatePessimisticFixpoint();
3527       return;
3528     }
3529 
3530     if (Instruction *CtxI = getCtxI())
3531       followUsesInMBEC(*this, A, getState(), *CtxI);
3532   }
3533 
3534   /// See AbstractAttribute::getState()
3535   /// {
3536   StateType &getState() override { return *this; }
3537   const StateType &getState() const override { return *this; }
3538   /// }
3539 
3540   /// Helper function for collecting accessed bytes in must-be-executed-context
3541   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3542                               DerefState &State) {
3543     const Value *UseV = U->get();
3544     if (!UseV->getType()->isPointerTy())
3545       return;
3546 
3547     Type *PtrTy = UseV->getType();
3548     const DataLayout &DL = A.getDataLayout();
3549     int64_t Offset;
3550     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3551             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3552       if (Base == &getAssociatedValue() &&
3553           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3554         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3555         State.addAccessedBytes(Offset, Size);
3556       }
3557     }
3558   }
3559 
3560   /// See followUsesInMBEC
3561   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3562                        AADereferenceable::StateType &State) {
3563     bool IsNonNull = false;
3564     bool TrackUse = false;
3565     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3566         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3567     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3568                       << " for instruction " << *I << "\n");
3569 
3570     addAccessedBytesForUse(A, U, I, State);
3571     State.takeKnownDerefBytesMaximum(DerefBytes);
3572     return TrackUse;
3573   }
3574 
3575   /// See AbstractAttribute::manifest(...).
3576   ChangeStatus manifest(Attributor &A) override {
3577     ChangeStatus Change = AADereferenceable::manifest(A);
3578     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3579       removeAttrs({Attribute::DereferenceableOrNull});
3580       return ChangeStatus::CHANGED;
3581     }
3582     return Change;
3583   }
3584 
3585   void getDeducedAttributes(LLVMContext &Ctx,
3586                             SmallVectorImpl<Attribute> &Attrs) const override {
3587     // TODO: Add *_globally support
3588     if (isAssumedNonNull())
3589       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3590           Ctx, getAssumedDereferenceableBytes()));
3591     else
3592       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3593           Ctx, getAssumedDereferenceableBytes()));
3594   }
3595 
3596   /// See AbstractAttribute::getAsStr().
3597   const std::string getAsStr() const override {
3598     if (!getAssumedDereferenceableBytes())
3599       return "unknown-dereferenceable";
3600     return std::string("dereferenceable") +
3601            (isAssumedNonNull() ? "" : "_or_null") +
3602            (isAssumedGlobal() ? "_globally" : "") + "<" +
3603            std::to_string(getKnownDereferenceableBytes()) + "-" +
3604            std::to_string(getAssumedDereferenceableBytes()) + ">";
3605   }
3606 };
3607 
3608 /// Dereferenceable attribute for a floating value.
3609 struct AADereferenceableFloating : AADereferenceableImpl {
3610   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3611       : AADereferenceableImpl(IRP, A) {}
3612 
3613   /// See AbstractAttribute::updateImpl(...).
3614   ChangeStatus updateImpl(Attributor &A) override {
3615     const DataLayout &DL = A.getDataLayout();
3616 
3617     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3618                             bool Stripped) -> bool {
3619       unsigned IdxWidth =
3620           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3621       APInt Offset(IdxWidth, 0);
3622       const Value *Base =
3623           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3624 
3625       const auto &AA = A.getAAFor<AADereferenceable>(
3626           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3627       int64_t DerefBytes = 0;
3628       if (!Stripped && this == &AA) {
3629         // Use IR information if we did not strip anything.
3630         // TODO: track globally.
3631         bool CanBeNull;
3632         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3633         T.GlobalState.indicatePessimisticFixpoint();
3634       } else {
3635         const DerefState &DS = AA.getState();
3636         DerefBytes = DS.DerefBytesState.getAssumed();
3637         T.GlobalState &= DS.GlobalState;
3638       }
3639 
3640       // For now we do not try to "increase" dereferenceability due to negative
3641       // indices as we first have to come up with code to deal with loops and
3642       // for overflows of the dereferenceable bytes.
3643       int64_t OffsetSExt = Offset.getSExtValue();
3644       if (OffsetSExt < 0)
3645         OffsetSExt = 0;
3646 
3647       T.takeAssumedDerefBytesMinimum(
3648           std::max(int64_t(0), DerefBytes - OffsetSExt));
3649 
3650       if (this == &AA) {
3651         if (!Stripped) {
3652           // If nothing was stripped IR information is all we got.
3653           T.takeKnownDerefBytesMaximum(
3654               std::max(int64_t(0), DerefBytes - OffsetSExt));
3655           T.indicatePessimisticFixpoint();
3656         } else if (OffsetSExt > 0) {
3657           // If something was stripped but there is circular reasoning we look
3658           // for the offset. If it is positive we basically decrease the
3659           // dereferenceable bytes in a circluar loop now, which will simply
3660           // drive them down to the known value in a very slow way which we
3661           // can accelerate.
3662           T.indicatePessimisticFixpoint();
3663         }
3664       }
3665 
3666       return T.isValidState();
3667     };
3668 
3669     DerefState T;
3670     if (!genericValueTraversal<AADereferenceable, DerefState>(
3671             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3672       return indicatePessimisticFixpoint();
3673 
3674     return clampStateAndIndicateChange(getState(), T);
3675   }
3676 
3677   /// See AbstractAttribute::trackStatistics()
3678   void trackStatistics() const override {
3679     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3680   }
3681 };
3682 
3683 /// Dereferenceable attribute for a return value.
3684 struct AADereferenceableReturned final
3685     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3686   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3687       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3688             IRP, A) {}
3689 
3690   /// See AbstractAttribute::trackStatistics()
3691   void trackStatistics() const override {
3692     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3693   }
3694 };
3695 
3696 /// Dereferenceable attribute for an argument
3697 struct AADereferenceableArgument final
3698     : AAArgumentFromCallSiteArguments<AADereferenceable,
3699                                       AADereferenceableImpl> {
3700   using Base =
3701       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3702   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3703       : Base(IRP, A) {}
3704 
3705   /// See AbstractAttribute::trackStatistics()
3706   void trackStatistics() const override {
3707     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3708   }
3709 };
3710 
3711 /// Dereferenceable attribute for a call site argument.
3712 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3713   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3714       : AADereferenceableFloating(IRP, A) {}
3715 
3716   /// See AbstractAttribute::trackStatistics()
3717   void trackStatistics() const override {
3718     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3719   }
3720 };
3721 
3722 /// Dereferenceable attribute deduction for a call site return value.
3723 struct AADereferenceableCallSiteReturned final
3724     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3725   using Base =
3726       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3727   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3728       : Base(IRP, A) {}
3729 
3730   /// See AbstractAttribute::trackStatistics()
3731   void trackStatistics() const override {
3732     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3733   }
3734 };
3735 
3736 // ------------------------ Align Argument Attribute ------------------------
3737 
3738 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3739                                     Value &AssociatedValue, const Use *U,
3740                                     const Instruction *I, bool &TrackUse) {
3741   // We need to follow common pointer manipulation uses to the accesses they
3742   // feed into.
3743   if (isa<CastInst>(I)) {
3744     // Follow all but ptr2int casts.
3745     TrackUse = !isa<PtrToIntInst>(I);
3746     return 0;
3747   }
3748   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3749     if (GEP->hasAllConstantIndices())
3750       TrackUse = true;
3751     return 0;
3752   }
3753 
3754   MaybeAlign MA;
3755   if (const auto *CB = dyn_cast<CallBase>(I)) {
3756     if (CB->isBundleOperand(U) || CB->isCallee(U))
3757       return 0;
3758 
3759     unsigned ArgNo = CB->getArgOperandNo(U);
3760     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3761     // As long as we only use known information there is no need to track
3762     // dependences here.
3763     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3764     MA = MaybeAlign(AlignAA.getKnownAlign());
3765   }
3766 
3767   const DataLayout &DL = A.getDataLayout();
3768   const Value *UseV = U->get();
3769   if (auto *SI = dyn_cast<StoreInst>(I)) {
3770     if (SI->getPointerOperand() == UseV)
3771       MA = SI->getAlign();
3772   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3773     if (LI->getPointerOperand() == UseV)
3774       MA = LI->getAlign();
3775   }
3776 
3777   if (!MA || *MA <= QueryingAA.getKnownAlign())
3778     return 0;
3779 
3780   unsigned Alignment = MA->value();
3781   int64_t Offset;
3782 
3783   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3784     if (Base == &AssociatedValue) {
3785       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3786       // So we can say that the maximum power of two which is a divisor of
3787       // gcd(Offset, Alignment) is an alignment.
3788 
3789       uint32_t gcd =
3790           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3791       Alignment = llvm::PowerOf2Floor(gcd);
3792     }
3793   }
3794 
3795   return Alignment;
3796 }
3797 
3798 struct AAAlignImpl : AAAlign {
3799   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3800 
3801   /// See AbstractAttribute::initialize(...).
3802   void initialize(Attributor &A) override {
3803     SmallVector<Attribute, 4> Attrs;
3804     getAttrs({Attribute::Alignment}, Attrs);
3805     for (const Attribute &Attr : Attrs)
3806       takeKnownMaximum(Attr.getValueAsInt());
3807 
3808     Value &V = getAssociatedValue();
3809     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3810     //       use of the function pointer. This was caused by D73131. We want to
3811     //       avoid this for function pointers especially because we iterate
3812     //       their uses and int2ptr is not handled. It is not a correctness
3813     //       problem though!
3814     if (!V.getType()->getPointerElementType()->isFunctionTy())
3815       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3816 
3817     if (getIRPosition().isFnInterfaceKind() &&
3818         (!getAnchorScope() ||
3819          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3820       indicatePessimisticFixpoint();
3821       return;
3822     }
3823 
3824     if (Instruction *CtxI = getCtxI())
3825       followUsesInMBEC(*this, A, getState(), *CtxI);
3826   }
3827 
3828   /// See AbstractAttribute::manifest(...).
3829   ChangeStatus manifest(Attributor &A) override {
3830     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3831 
3832     // Check for users that allow alignment annotations.
3833     Value &AssociatedValue = getAssociatedValue();
3834     for (const Use &U : AssociatedValue.uses()) {
3835       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3836         if (SI->getPointerOperand() == &AssociatedValue)
3837           if (SI->getAlignment() < getAssumedAlign()) {
3838             STATS_DECLTRACK(AAAlign, Store,
3839                             "Number of times alignment added to a store");
3840             SI->setAlignment(Align(getAssumedAlign()));
3841             LoadStoreChanged = ChangeStatus::CHANGED;
3842           }
3843       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3844         if (LI->getPointerOperand() == &AssociatedValue)
3845           if (LI->getAlignment() < getAssumedAlign()) {
3846             LI->setAlignment(Align(getAssumedAlign()));
3847             STATS_DECLTRACK(AAAlign, Load,
3848                             "Number of times alignment added to a load");
3849             LoadStoreChanged = ChangeStatus::CHANGED;
3850           }
3851       }
3852     }
3853 
3854     ChangeStatus Changed = AAAlign::manifest(A);
3855 
3856     Align InheritAlign =
3857         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3858     if (InheritAlign >= getAssumedAlign())
3859       return LoadStoreChanged;
3860     return Changed | LoadStoreChanged;
3861   }
3862 
3863   // TODO: Provide a helper to determine the implied ABI alignment and check in
3864   //       the existing manifest method and a new one for AAAlignImpl that value
3865   //       to avoid making the alignment explicit if it did not improve.
3866 
3867   /// See AbstractAttribute::getDeducedAttributes
3868   virtual void
3869   getDeducedAttributes(LLVMContext &Ctx,
3870                        SmallVectorImpl<Attribute> &Attrs) const override {
3871     if (getAssumedAlign() > 1)
3872       Attrs.emplace_back(
3873           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3874   }
3875 
3876   /// See followUsesInMBEC
3877   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3878                        AAAlign::StateType &State) {
3879     bool TrackUse = false;
3880 
3881     unsigned int KnownAlign =
3882         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3883     State.takeKnownMaximum(KnownAlign);
3884 
3885     return TrackUse;
3886   }
3887 
3888   /// See AbstractAttribute::getAsStr().
3889   const std::string getAsStr() const override {
3890     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3891                                 "-" + std::to_string(getAssumedAlign()) + ">")
3892                              : "unknown-align";
3893   }
3894 };
3895 
3896 /// Align attribute for a floating value.
3897 struct AAAlignFloating : AAAlignImpl {
3898   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3899 
3900   /// See AbstractAttribute::updateImpl(...).
3901   ChangeStatus updateImpl(Attributor &A) override {
3902     const DataLayout &DL = A.getDataLayout();
3903 
3904     auto VisitValueCB = [&](Value &V, const Instruction *,
3905                             AAAlign::StateType &T, bool Stripped) -> bool {
3906       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3907                                            DepClassTy::REQUIRED);
3908       if (!Stripped && this == &AA) {
3909         int64_t Offset;
3910         unsigned Alignment = 1;
3911         if (const Value *Base =
3912                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3913           Align PA = Base->getPointerAlignment(DL);
3914           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3915           // So we can say that the maximum power of two which is a divisor of
3916           // gcd(Offset, Alignment) is an alignment.
3917 
3918           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3919                                                uint32_t(PA.value()));
3920           Alignment = llvm::PowerOf2Floor(gcd);
3921         } else {
3922           Alignment = V.getPointerAlignment(DL).value();
3923         }
3924         // Use only IR information if we did not strip anything.
3925         T.takeKnownMaximum(Alignment);
3926         T.indicatePessimisticFixpoint();
3927       } else {
3928         // Use abstract attribute information.
3929         const AAAlign::StateType &DS = AA.getState();
3930         T ^= DS;
3931       }
3932       return T.isValidState();
3933     };
3934 
3935     StateType T;
3936     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3937                                                    VisitValueCB, getCtxI()))
3938       return indicatePessimisticFixpoint();
3939 
3940     // TODO: If we know we visited all incoming values, thus no are assumed
3941     // dead, we can take the known information from the state T.
3942     return clampStateAndIndicateChange(getState(), T);
3943   }
3944 
3945   /// See AbstractAttribute::trackStatistics()
3946   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3947 };
3948 
3949 /// Align attribute for function return value.
3950 struct AAAlignReturned final
3951     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3952   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3953   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3954 
3955   /// See AbstractAttribute::initialize(...).
3956   void initialize(Attributor &A) override {
3957     Base::initialize(A);
3958     Function *F = getAssociatedFunction();
3959     if (!F || F->isDeclaration())
3960       indicatePessimisticFixpoint();
3961   }
3962 
3963   /// See AbstractAttribute::trackStatistics()
3964   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3965 };
3966 
3967 /// Align attribute for function argument.
3968 struct AAAlignArgument final
3969     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3970   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3971   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3972 
3973   /// See AbstractAttribute::manifest(...).
3974   ChangeStatus manifest(Attributor &A) override {
3975     // If the associated argument is involved in a must-tail call we give up
3976     // because we would need to keep the argument alignments of caller and
3977     // callee in-sync. Just does not seem worth the trouble right now.
3978     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3979       return ChangeStatus::UNCHANGED;
3980     return Base::manifest(A);
3981   }
3982 
3983   /// See AbstractAttribute::trackStatistics()
3984   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3985 };
3986 
3987 struct AAAlignCallSiteArgument final : AAAlignFloating {
3988   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3989       : AAAlignFloating(IRP, A) {}
3990 
3991   /// See AbstractAttribute::manifest(...).
3992   ChangeStatus manifest(Attributor &A) override {
3993     // If the associated argument is involved in a must-tail call we give up
3994     // because we would need to keep the argument alignments of caller and
3995     // callee in-sync. Just does not seem worth the trouble right now.
3996     if (Argument *Arg = getAssociatedArgument())
3997       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3998         return ChangeStatus::UNCHANGED;
3999     ChangeStatus Changed = AAAlignImpl::manifest(A);
4000     Align InheritAlign =
4001         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4002     if (InheritAlign >= getAssumedAlign())
4003       Changed = ChangeStatus::UNCHANGED;
4004     return Changed;
4005   }
4006 
4007   /// See AbstractAttribute::updateImpl(Attributor &A).
4008   ChangeStatus updateImpl(Attributor &A) override {
4009     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4010     if (Argument *Arg = getAssociatedArgument()) {
4011       // We only take known information from the argument
4012       // so we do not need to track a dependence.
4013       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4014           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4015       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4016     }
4017     return Changed;
4018   }
4019 
4020   /// See AbstractAttribute::trackStatistics()
4021   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4022 };
4023 
4024 /// Align attribute deduction for a call site return value.
4025 struct AAAlignCallSiteReturned final
4026     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4027   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4028   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4029       : Base(IRP, A) {}
4030 
4031   /// See AbstractAttribute::initialize(...).
4032   void initialize(Attributor &A) override {
4033     Base::initialize(A);
4034     Function *F = getAssociatedFunction();
4035     if (!F || F->isDeclaration())
4036       indicatePessimisticFixpoint();
4037   }
4038 
4039   /// See AbstractAttribute::trackStatistics()
4040   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4041 };
4042 
4043 /// ------------------ Function No-Return Attribute ----------------------------
4044 struct AANoReturnImpl : public AANoReturn {
4045   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4046 
4047   /// See AbstractAttribute::initialize(...).
4048   void initialize(Attributor &A) override {
4049     AANoReturn::initialize(A);
4050     Function *F = getAssociatedFunction();
4051     if (!F || F->isDeclaration())
4052       indicatePessimisticFixpoint();
4053   }
4054 
4055   /// See AbstractAttribute::getAsStr().
4056   const std::string getAsStr() const override {
4057     return getAssumed() ? "noreturn" : "may-return";
4058   }
4059 
4060   /// See AbstractAttribute::updateImpl(Attributor &A).
4061   virtual ChangeStatus updateImpl(Attributor &A) override {
4062     auto CheckForNoReturn = [](Instruction &) { return false; };
4063     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4064                                    {(unsigned)Instruction::Ret}))
4065       return indicatePessimisticFixpoint();
4066     return ChangeStatus::UNCHANGED;
4067   }
4068 };
4069 
4070 struct AANoReturnFunction final : AANoReturnImpl {
4071   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4072       : AANoReturnImpl(IRP, A) {}
4073 
4074   /// See AbstractAttribute::trackStatistics()
4075   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4076 };
4077 
4078 /// NoReturn attribute deduction for a call sites.
4079 struct AANoReturnCallSite final : AANoReturnImpl {
4080   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4081       : AANoReturnImpl(IRP, A) {}
4082 
4083   /// See AbstractAttribute::initialize(...).
4084   void initialize(Attributor &A) override {
4085     AANoReturnImpl::initialize(A);
4086     if (Function *F = getAssociatedFunction()) {
4087       const IRPosition &FnPos = IRPosition::function(*F);
4088       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4089       if (!FnAA.isAssumedNoReturn())
4090         indicatePessimisticFixpoint();
4091     }
4092   }
4093 
4094   /// See AbstractAttribute::updateImpl(...).
4095   ChangeStatus updateImpl(Attributor &A) override {
4096     // TODO: Once we have call site specific value information we can provide
4097     //       call site specific liveness information and then it makes
4098     //       sense to specialize attributes for call sites arguments instead of
4099     //       redirecting requests to the callee argument.
4100     Function *F = getAssociatedFunction();
4101     const IRPosition &FnPos = IRPosition::function(*F);
4102     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4103     return clampStateAndIndicateChange(getState(), FnAA.getState());
4104   }
4105 
4106   /// See AbstractAttribute::trackStatistics()
4107   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4108 };
4109 
4110 /// ----------------------- Variable Capturing ---------------------------------
4111 
4112 /// A class to hold the state of for no-capture attributes.
4113 struct AANoCaptureImpl : public AANoCapture {
4114   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4115 
4116   /// See AbstractAttribute::initialize(...).
4117   void initialize(Attributor &A) override {
4118     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4119       indicateOptimisticFixpoint();
4120       return;
4121     }
4122     Function *AnchorScope = getAnchorScope();
4123     if (isFnInterfaceKind() &&
4124         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4125       indicatePessimisticFixpoint();
4126       return;
4127     }
4128 
4129     // You cannot "capture" null in the default address space.
4130     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4131         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4132       indicateOptimisticFixpoint();
4133       return;
4134     }
4135 
4136     const Function *F =
4137         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4138 
4139     // Check what state the associated function can actually capture.
4140     if (F)
4141       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4142     else
4143       indicatePessimisticFixpoint();
4144   }
4145 
4146   /// See AbstractAttribute::updateImpl(...).
4147   ChangeStatus updateImpl(Attributor &A) override;
4148 
4149   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4150   virtual void
4151   getDeducedAttributes(LLVMContext &Ctx,
4152                        SmallVectorImpl<Attribute> &Attrs) const override {
4153     if (!isAssumedNoCaptureMaybeReturned())
4154       return;
4155 
4156     if (isArgumentPosition()) {
4157       if (isAssumedNoCapture())
4158         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4159       else if (ManifestInternal)
4160         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4161     }
4162   }
4163 
4164   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4165   /// depending on the ability of the function associated with \p IRP to capture
4166   /// state in memory and through "returning/throwing", respectively.
4167   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4168                                                    const Function &F,
4169                                                    BitIntegerState &State) {
4170     // TODO: Once we have memory behavior attributes we should use them here.
4171 
4172     // If we know we cannot communicate or write to memory, we do not care about
4173     // ptr2int anymore.
4174     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4175         F.getReturnType()->isVoidTy()) {
4176       State.addKnownBits(NO_CAPTURE);
4177       return;
4178     }
4179 
4180     // A function cannot capture state in memory if it only reads memory, it can
4181     // however return/throw state and the state might be influenced by the
4182     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4183     if (F.onlyReadsMemory())
4184       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4185 
4186     // A function cannot communicate state back if it does not through
4187     // exceptions and doesn not return values.
4188     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4189       State.addKnownBits(NOT_CAPTURED_IN_RET);
4190 
4191     // Check existing "returned" attributes.
4192     int ArgNo = IRP.getCalleeArgNo();
4193     if (F.doesNotThrow() && ArgNo >= 0) {
4194       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4195         if (F.hasParamAttribute(u, Attribute::Returned)) {
4196           if (u == unsigned(ArgNo))
4197             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4198           else if (F.onlyReadsMemory())
4199             State.addKnownBits(NO_CAPTURE);
4200           else
4201             State.addKnownBits(NOT_CAPTURED_IN_RET);
4202           break;
4203         }
4204     }
4205   }
4206 
4207   /// See AbstractState::getAsStr().
4208   const std::string getAsStr() const override {
4209     if (isKnownNoCapture())
4210       return "known not-captured";
4211     if (isAssumedNoCapture())
4212       return "assumed not-captured";
4213     if (isKnownNoCaptureMaybeReturned())
4214       return "known not-captured-maybe-returned";
4215     if (isAssumedNoCaptureMaybeReturned())
4216       return "assumed not-captured-maybe-returned";
4217     return "assumed-captured";
4218   }
4219 };
4220 
4221 /// Attributor-aware capture tracker.
4222 struct AACaptureUseTracker final : public CaptureTracker {
4223 
4224   /// Create a capture tracker that can lookup in-flight abstract attributes
4225   /// through the Attributor \p A.
4226   ///
4227   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4228   /// search is stopped. If a use leads to a return instruction,
4229   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4230   /// If a use leads to a ptr2int which may capture the value,
4231   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4232   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4233   /// set. All values in \p PotentialCopies are later tracked as well. For every
4234   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4235   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4236   /// conservatively set to true.
4237   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4238                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4239                       SmallVectorImpl<const Value *> &PotentialCopies,
4240                       unsigned &RemainingUsesToExplore)
4241       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4242         PotentialCopies(PotentialCopies),
4243         RemainingUsesToExplore(RemainingUsesToExplore) {}
4244 
4245   /// Determine if \p V maybe captured. *Also updates the state!*
4246   bool valueMayBeCaptured(const Value *V) {
4247     if (V->getType()->isPointerTy()) {
4248       PointerMayBeCaptured(V, this);
4249     } else {
4250       State.indicatePessimisticFixpoint();
4251     }
4252     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4253   }
4254 
4255   /// See CaptureTracker::tooManyUses().
4256   void tooManyUses() override {
4257     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4258   }
4259 
4260   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4261     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4262       return true;
4263     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4264         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4265     return DerefAA.getAssumedDereferenceableBytes();
4266   }
4267 
4268   /// See CaptureTracker::captured(...).
4269   bool captured(const Use *U) override {
4270     Instruction *UInst = cast<Instruction>(U->getUser());
4271     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4272                       << "\n");
4273 
4274     // Because we may reuse the tracker multiple times we keep track of the
4275     // number of explored uses ourselves as well.
4276     if (RemainingUsesToExplore-- == 0) {
4277       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4278       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4279                           /* Return */ true);
4280     }
4281 
4282     // Deal with ptr2int by following uses.
4283     if (isa<PtrToIntInst>(UInst)) {
4284       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4285       return valueMayBeCaptured(UInst);
4286     }
4287 
4288     // Explicitly catch return instructions.
4289     if (isa<ReturnInst>(UInst))
4290       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4291                           /* Return */ true);
4292 
4293     // For now we only use special logic for call sites. However, the tracker
4294     // itself knows about a lot of other non-capturing cases already.
4295     auto *CB = dyn_cast<CallBase>(UInst);
4296     if (!CB || !CB->isArgOperand(U))
4297       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4298                           /* Return */ true);
4299 
4300     unsigned ArgNo = CB->getArgOperandNo(U);
4301     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4302     // If we have a abstract no-capture attribute for the argument we can use
4303     // it to justify a non-capture attribute here. This allows recursion!
4304     auto &ArgNoCaptureAA =
4305         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4306     if (ArgNoCaptureAA.isAssumedNoCapture())
4307       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4308                           /* Return */ false);
4309     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4310       addPotentialCopy(*CB);
4311       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4312                           /* Return */ false);
4313     }
4314 
4315     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4316     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4317                         /* Return */ true);
4318   }
4319 
4320   /// Register \p CS as potential copy of the value we are checking.
4321   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4322 
4323   /// See CaptureTracker::shouldExplore(...).
4324   bool shouldExplore(const Use *U) override {
4325     // Check liveness and ignore droppable users.
4326     return !U->getUser()->isDroppable() &&
4327            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4328   }
4329 
4330   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4331   /// \p CapturedInRet, then return the appropriate value for use in the
4332   /// CaptureTracker::captured() interface.
4333   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4334                     bool CapturedInRet) {
4335     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4336                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4337     if (CapturedInMem)
4338       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4339     if (CapturedInInt)
4340       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4341     if (CapturedInRet)
4342       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4343     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4344   }
4345 
4346 private:
4347   /// The attributor providing in-flight abstract attributes.
4348   Attributor &A;
4349 
4350   /// The abstract attribute currently updated.
4351   AANoCapture &NoCaptureAA;
4352 
4353   /// The abstract liveness state.
4354   const AAIsDead &IsDeadAA;
4355 
4356   /// The state currently updated.
4357   AANoCapture::StateType &State;
4358 
4359   /// Set of potential copies of the tracked value.
4360   SmallVectorImpl<const Value *> &PotentialCopies;
4361 
4362   /// Global counter to limit the number of explored uses.
4363   unsigned &RemainingUsesToExplore;
4364 };
4365 
4366 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4367   const IRPosition &IRP = getIRPosition();
4368   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4369                                         : &IRP.getAssociatedValue();
4370   if (!V)
4371     return indicatePessimisticFixpoint();
4372 
4373   const Function *F =
4374       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4375   assert(F && "Expected a function!");
4376   const IRPosition &FnPos = IRPosition::function(*F);
4377   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4378 
4379   AANoCapture::StateType T;
4380 
4381   // Readonly means we cannot capture through memory.
4382   const auto &FnMemAA =
4383       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4384   if (FnMemAA.isAssumedReadOnly()) {
4385     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4386     if (FnMemAA.isKnownReadOnly())
4387       addKnownBits(NOT_CAPTURED_IN_MEM);
4388     else
4389       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4390   }
4391 
4392   // Make sure all returned values are different than the underlying value.
4393   // TODO: we could do this in a more sophisticated way inside
4394   //       AAReturnedValues, e.g., track all values that escape through returns
4395   //       directly somehow.
4396   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4397     bool SeenConstant = false;
4398     for (auto &It : RVAA.returned_values()) {
4399       if (isa<Constant>(It.first)) {
4400         if (SeenConstant)
4401           return false;
4402         SeenConstant = true;
4403       } else if (!isa<Argument>(It.first) ||
4404                  It.first == getAssociatedArgument())
4405         return false;
4406     }
4407     return true;
4408   };
4409 
4410   const auto &NoUnwindAA =
4411       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4412   if (NoUnwindAA.isAssumedNoUnwind()) {
4413     bool IsVoidTy = F->getReturnType()->isVoidTy();
4414     const AAReturnedValues *RVAA =
4415         IsVoidTy ? nullptr
4416                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4417 
4418                                                  DepClassTy::OPTIONAL);
4419     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4420       T.addKnownBits(NOT_CAPTURED_IN_RET);
4421       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4422         return ChangeStatus::UNCHANGED;
4423       if (NoUnwindAA.isKnownNoUnwind() &&
4424           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4425         addKnownBits(NOT_CAPTURED_IN_RET);
4426         if (isKnown(NOT_CAPTURED_IN_MEM))
4427           return indicateOptimisticFixpoint();
4428       }
4429     }
4430   }
4431 
4432   // Use the CaptureTracker interface and logic with the specialized tracker,
4433   // defined in AACaptureUseTracker, that can look at in-flight abstract
4434   // attributes and directly updates the assumed state.
4435   SmallVector<const Value *, 4> PotentialCopies;
4436   unsigned RemainingUsesToExplore =
4437       getDefaultMaxUsesToExploreForCaptureTracking();
4438   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4439                               RemainingUsesToExplore);
4440 
4441   // Check all potential copies of the associated value until we can assume
4442   // none will be captured or we have to assume at least one might be.
4443   unsigned Idx = 0;
4444   PotentialCopies.push_back(V);
4445   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4446     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4447 
4448   AANoCapture::StateType &S = getState();
4449   auto Assumed = S.getAssumed();
4450   S.intersectAssumedBits(T.getAssumed());
4451   if (!isAssumedNoCaptureMaybeReturned())
4452     return indicatePessimisticFixpoint();
4453   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4454                                    : ChangeStatus::CHANGED;
4455 }
4456 
4457 /// NoCapture attribute for function arguments.
4458 struct AANoCaptureArgument final : AANoCaptureImpl {
4459   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4460       : AANoCaptureImpl(IRP, A) {}
4461 
4462   /// See AbstractAttribute::trackStatistics()
4463   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4464 };
4465 
4466 /// NoCapture attribute for call site arguments.
4467 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4468   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4469       : AANoCaptureImpl(IRP, A) {}
4470 
4471   /// See AbstractAttribute::initialize(...).
4472   void initialize(Attributor &A) override {
4473     if (Argument *Arg = getAssociatedArgument())
4474       if (Arg->hasByValAttr())
4475         indicateOptimisticFixpoint();
4476     AANoCaptureImpl::initialize(A);
4477   }
4478 
4479   /// See AbstractAttribute::updateImpl(...).
4480   ChangeStatus updateImpl(Attributor &A) override {
4481     // TODO: Once we have call site specific value information we can provide
4482     //       call site specific liveness information and then it makes
4483     //       sense to specialize attributes for call sites arguments instead of
4484     //       redirecting requests to the callee argument.
4485     Argument *Arg = getAssociatedArgument();
4486     if (!Arg)
4487       return indicatePessimisticFixpoint();
4488     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4489     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4490     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4491   }
4492 
4493   /// See AbstractAttribute::trackStatistics()
4494   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4495 };
4496 
4497 /// NoCapture attribute for floating values.
4498 struct AANoCaptureFloating final : AANoCaptureImpl {
4499   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4500       : AANoCaptureImpl(IRP, A) {}
4501 
4502   /// See AbstractAttribute::trackStatistics()
4503   void trackStatistics() const override {
4504     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4505   }
4506 };
4507 
4508 /// NoCapture attribute for function return value.
4509 struct AANoCaptureReturned final : AANoCaptureImpl {
4510   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4511       : AANoCaptureImpl(IRP, A) {
4512     llvm_unreachable("NoCapture is not applicable to function returns!");
4513   }
4514 
4515   /// See AbstractAttribute::initialize(...).
4516   void initialize(Attributor &A) override {
4517     llvm_unreachable("NoCapture is not applicable to function returns!");
4518   }
4519 
4520   /// See AbstractAttribute::updateImpl(...).
4521   ChangeStatus updateImpl(Attributor &A) override {
4522     llvm_unreachable("NoCapture is not applicable to function returns!");
4523   }
4524 
4525   /// See AbstractAttribute::trackStatistics()
4526   void trackStatistics() const override {}
4527 };
4528 
4529 /// NoCapture attribute deduction for a call site return value.
4530 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4531   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4532       : AANoCaptureImpl(IRP, A) {}
4533 
4534   /// See AbstractAttribute::initialize(...).
4535   void initialize(Attributor &A) override {
4536     const Function *F = getAnchorScope();
4537     // Check what state the associated function can actually capture.
4538     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4539   }
4540 
4541   /// See AbstractAttribute::trackStatistics()
4542   void trackStatistics() const override {
4543     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4544   }
4545 };
4546 
4547 /// ------------------ Value Simplify Attribute ----------------------------
4548 struct AAValueSimplifyImpl : AAValueSimplify {
4549   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4550       : AAValueSimplify(IRP, A) {}
4551 
4552   /// See AbstractAttribute::initialize(...).
4553   void initialize(Attributor &A) override {
4554     if (getAssociatedValue().getType()->isVoidTy())
4555       indicatePessimisticFixpoint();
4556   }
4557 
4558   /// See AbstractAttribute::getAsStr().
4559   const std::string getAsStr() const override {
4560     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4561                         : "not-simple";
4562   }
4563 
4564   /// See AbstractAttribute::trackStatistics()
4565   void trackStatistics() const override {}
4566 
4567   /// See AAValueSimplify::getAssumedSimplifiedValue()
4568   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4569     if (!getAssumed())
4570       return const_cast<Value *>(&getAssociatedValue());
4571     return SimplifiedAssociatedValue;
4572   }
4573 
4574   /// Helper function for querying AAValueSimplify and updating candicate.
4575   /// \param QueryingValue Value trying to unify with SimplifiedValue
4576   /// \param AccumulatedSimplifiedValue Current simplification result.
4577   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4578                              Value &QueryingValue,
4579                              Optional<Value *> &AccumulatedSimplifiedValue) {
4580     // FIXME: Add a typecast support.
4581 
4582     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4583         QueryingAA, IRPosition::value(QueryingValue), DepClassTy::REQUIRED);
4584 
4585     Optional<Value *> QueryingValueSimplified =
4586         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4587 
4588     if (!QueryingValueSimplified.hasValue())
4589       return true;
4590 
4591     if (!QueryingValueSimplified.getValue())
4592       return false;
4593 
4594     Value &QueryingValueSimplifiedUnwrapped =
4595         *QueryingValueSimplified.getValue();
4596 
4597     if (AccumulatedSimplifiedValue.hasValue() &&
4598         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4599         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4600       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4601     if (AccumulatedSimplifiedValue.hasValue() &&
4602         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4603       return true;
4604 
4605     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4606                       << " is assumed to be "
4607                       << QueryingValueSimplifiedUnwrapped << "\n");
4608 
4609     AccumulatedSimplifiedValue = QueryingValueSimplified;
4610     return true;
4611   }
4612 
4613   /// Returns a candidate is found or not
4614   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4615     if (!getAssociatedValue().getType()->isIntegerTy())
4616       return false;
4617 
4618     const auto &AA =
4619         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4620 
4621     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4622 
4623     if (!COpt.hasValue()) {
4624       SimplifiedAssociatedValue = llvm::None;
4625       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4626       return true;
4627     }
4628     if (auto *C = COpt.getValue()) {
4629       SimplifiedAssociatedValue = C;
4630       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4631       return true;
4632     }
4633     return false;
4634   }
4635 
4636   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4637     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4638       return true;
4639     if (askSimplifiedValueFor<AAPotentialValues>(A))
4640       return true;
4641     return false;
4642   }
4643 
4644   /// See AbstractAttribute::manifest(...).
4645   ChangeStatus manifest(Attributor &A) override {
4646     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4647 
4648     if (SimplifiedAssociatedValue.hasValue() &&
4649         !SimplifiedAssociatedValue.getValue())
4650       return Changed;
4651 
4652     Value &V = getAssociatedValue();
4653     auto *C = SimplifiedAssociatedValue.hasValue()
4654                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4655                   : UndefValue::get(V.getType());
4656     if (C) {
4657       // We can replace the AssociatedValue with the constant.
4658       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4659         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4660                           << " :: " << *this << "\n");
4661         if (A.changeValueAfterManifest(V, *C))
4662           Changed = ChangeStatus::CHANGED;
4663       }
4664     }
4665 
4666     return Changed | AAValueSimplify::manifest(A);
4667   }
4668 
4669   /// See AbstractState::indicatePessimisticFixpoint(...).
4670   ChangeStatus indicatePessimisticFixpoint() override {
4671     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4672     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4673     SimplifiedAssociatedValue = &getAssociatedValue();
4674     indicateOptimisticFixpoint();
4675     return ChangeStatus::CHANGED;
4676   }
4677 
4678 protected:
4679   // An assumed simplified value. Initially, it is set to Optional::None, which
4680   // means that the value is not clear under current assumption. If in the
4681   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4682   // returns orignal associated value.
4683   Optional<Value *> SimplifiedAssociatedValue;
4684 };
4685 
4686 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4687   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4688       : AAValueSimplifyImpl(IRP, A) {}
4689 
4690   void initialize(Attributor &A) override {
4691     AAValueSimplifyImpl::initialize(A);
4692     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4693       indicatePessimisticFixpoint();
4694     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4695                  Attribute::StructRet, Attribute::Nest},
4696                 /* IgnoreSubsumingPositions */ true))
4697       indicatePessimisticFixpoint();
4698 
4699     // FIXME: This is a hack to prevent us from propagating function poiner in
4700     // the new pass manager CGSCC pass as it creates call edges the
4701     // CallGraphUpdater cannot handle yet.
4702     Value &V = getAssociatedValue();
4703     if (V.getType()->isPointerTy() &&
4704         V.getType()->getPointerElementType()->isFunctionTy() &&
4705         !A.isModulePass())
4706       indicatePessimisticFixpoint();
4707   }
4708 
4709   /// See AbstractAttribute::updateImpl(...).
4710   ChangeStatus updateImpl(Attributor &A) override {
4711     // Byval is only replacable if it is readonly otherwise we would write into
4712     // the replaced value and not the copy that byval creates implicitly.
4713     Argument *Arg = getAssociatedArgument();
4714     if (Arg->hasByValAttr()) {
4715       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4716       //       there is no race by not copying a constant byval.
4717       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4718                                                        DepClassTy::REQUIRED);
4719       if (!MemAA.isAssumedReadOnly())
4720         return indicatePessimisticFixpoint();
4721     }
4722 
4723     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4724 
4725     auto PredForCallSite = [&](AbstractCallSite ACS) {
4726       const IRPosition &ACSArgPos =
4727           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4728       // Check if a coresponding argument was found or if it is on not
4729       // associated (which can happen for callback calls).
4730       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4731         return false;
4732 
4733       // We can only propagate thread independent values through callbacks.
4734       // This is different to direct/indirect call sites because for them we
4735       // know the thread executing the caller and callee is the same. For
4736       // callbacks this is not guaranteed, thus a thread dependent value could
4737       // be different for the caller and callee, making it invalid to propagate.
4738       Value &ArgOp = ACSArgPos.getAssociatedValue();
4739       if (ACS.isCallbackCall())
4740         if (auto *C = dyn_cast<Constant>(&ArgOp))
4741           if (C->isThreadDependent())
4742             return false;
4743       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4744     };
4745 
4746     bool AllCallSitesKnown;
4747     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4748                                 AllCallSitesKnown))
4749       if (!askSimplifiedValueForOtherAAs(A))
4750         return indicatePessimisticFixpoint();
4751 
4752     // If a candicate was found in this update, return CHANGED.
4753     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4754                ? ChangeStatus::UNCHANGED
4755                : ChangeStatus ::CHANGED;
4756   }
4757 
4758   /// See AbstractAttribute::trackStatistics()
4759   void trackStatistics() const override {
4760     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4761   }
4762 };
4763 
4764 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4765   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4766       : AAValueSimplifyImpl(IRP, A) {}
4767 
4768   /// See AbstractAttribute::updateImpl(...).
4769   ChangeStatus updateImpl(Attributor &A) override {
4770     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4771 
4772     auto PredForReturned = [&](Value &V) {
4773       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4774     };
4775 
4776     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4777       if (!askSimplifiedValueForOtherAAs(A))
4778         return indicatePessimisticFixpoint();
4779 
4780     // If a candicate was found in this update, return CHANGED.
4781     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4782                ? ChangeStatus::UNCHANGED
4783                : ChangeStatus ::CHANGED;
4784   }
4785 
4786   ChangeStatus manifest(Attributor &A) override {
4787     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4788 
4789     if (SimplifiedAssociatedValue.hasValue() &&
4790         !SimplifiedAssociatedValue.getValue())
4791       return Changed;
4792 
4793     Value &V = getAssociatedValue();
4794     auto *C = SimplifiedAssociatedValue.hasValue()
4795                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4796                   : UndefValue::get(V.getType());
4797     if (C) {
4798       auto PredForReturned =
4799           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4800             // We can replace the AssociatedValue with the constant.
4801             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4802               return true;
4803 
4804             for (ReturnInst *RI : RetInsts) {
4805               if (RI->getFunction() != getAnchorScope())
4806                 continue;
4807               auto *RC = C;
4808               if (RC->getType() != RI->getReturnValue()->getType())
4809                 RC = ConstantExpr::getBitCast(RC,
4810                                               RI->getReturnValue()->getType());
4811               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4812                                 << " in " << *RI << " :: " << *this << "\n");
4813               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4814                 Changed = ChangeStatus::CHANGED;
4815             }
4816             return true;
4817           };
4818       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4819     }
4820 
4821     return Changed | AAValueSimplify::manifest(A);
4822   }
4823 
4824   /// See AbstractAttribute::trackStatistics()
4825   void trackStatistics() const override {
4826     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4827   }
4828 };
4829 
4830 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4831   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4832       : AAValueSimplifyImpl(IRP, A) {}
4833 
4834   /// See AbstractAttribute::initialize(...).
4835   void initialize(Attributor &A) override {
4836     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4837     //        Needs investigation.
4838     // AAValueSimplifyImpl::initialize(A);
4839     Value &V = getAnchorValue();
4840 
4841     // TODO: add other stuffs
4842     if (isa<Constant>(V))
4843       indicatePessimisticFixpoint();
4844   }
4845 
4846   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4847   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4848   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4849   /// updated and \p Changed is set appropriately.
4850   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4851                               ChangeStatus &Changed) {
4852     if (!ICmp)
4853       return false;
4854     if (!ICmp->isEquality())
4855       return false;
4856 
4857     // This is a comparison with == or !-. We check for nullptr now.
4858     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4859     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4860     if (!Op0IsNull && !Op1IsNull)
4861       return false;
4862 
4863     LLVMContext &Ctx = ICmp->getContext();
4864     // Check for `nullptr ==/!= nullptr` first:
4865     if (Op0IsNull && Op1IsNull) {
4866       Value *NewVal = ConstantInt::get(
4867           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4868       assert(!SimplifiedAssociatedValue.hasValue() &&
4869              "Did not expect non-fixed value for constant comparison");
4870       SimplifiedAssociatedValue = NewVal;
4871       indicateOptimisticFixpoint();
4872       Changed = ChangeStatus::CHANGED;
4873       return true;
4874     }
4875 
4876     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4877     // non-nullptr operand and if we assume it's non-null we can conclude the
4878     // result of the comparison.
4879     assert((Op0IsNull || Op1IsNull) &&
4880            "Expected nullptr versus non-nullptr comparison at this point");
4881 
4882     // The index is the operand that we assume is not null.
4883     unsigned PtrIdx = Op0IsNull;
4884     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4885         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4886         DepClassTy::REQUIRED);
4887     if (!PtrNonNullAA.isAssumedNonNull())
4888       return false;
4889 
4890     // The new value depends on the predicate, true for != and false for ==.
4891     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4892                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4893 
4894     assert((!SimplifiedAssociatedValue.hasValue() ||
4895             SimplifiedAssociatedValue == NewVal) &&
4896            "Did not expect to change value for zero-comparison");
4897 
4898     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4899     SimplifiedAssociatedValue = NewVal;
4900 
4901     if (PtrNonNullAA.isKnownNonNull())
4902       indicateOptimisticFixpoint();
4903 
4904     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4905     return true;
4906   }
4907 
4908   /// See AbstractAttribute::updateImpl(...).
4909   ChangeStatus updateImpl(Attributor &A) override {
4910     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4911 
4912     ChangeStatus Changed;
4913     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4914                                Changed))
4915       return Changed;
4916 
4917     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4918                             bool Stripped) -> bool {
4919       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V),
4920                                              DepClassTy::REQUIRED);
4921       if (!Stripped && this == &AA) {
4922         // TODO: Look the instruction and check recursively.
4923 
4924         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4925                           << "\n");
4926         return false;
4927       }
4928       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4929     };
4930 
4931     bool Dummy = false;
4932     if (!genericValueTraversal<AAValueSimplify, bool>(
4933             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4934             /* UseValueSimplify */ false))
4935       if (!askSimplifiedValueForOtherAAs(A))
4936         return indicatePessimisticFixpoint();
4937 
4938     // If a candicate was found in this update, return CHANGED.
4939 
4940     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4941                ? ChangeStatus::UNCHANGED
4942                : ChangeStatus ::CHANGED;
4943   }
4944 
4945   /// See AbstractAttribute::trackStatistics()
4946   void trackStatistics() const override {
4947     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4948   }
4949 };
4950 
4951 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4952   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4953       : AAValueSimplifyImpl(IRP, A) {}
4954 
4955   /// See AbstractAttribute::initialize(...).
4956   void initialize(Attributor &A) override {
4957     SimplifiedAssociatedValue = &getAnchorValue();
4958     indicateOptimisticFixpoint();
4959   }
4960   /// See AbstractAttribute::initialize(...).
4961   ChangeStatus updateImpl(Attributor &A) override {
4962     llvm_unreachable(
4963         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4964   }
4965   /// See AbstractAttribute::trackStatistics()
4966   void trackStatistics() const override {
4967     STATS_DECLTRACK_FN_ATTR(value_simplify)
4968   }
4969 };
4970 
4971 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4972   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4973       : AAValueSimplifyFunction(IRP, A) {}
4974   /// See AbstractAttribute::trackStatistics()
4975   void trackStatistics() const override {
4976     STATS_DECLTRACK_CS_ATTR(value_simplify)
4977   }
4978 };
4979 
4980 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4981   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4982       : AAValueSimplifyReturned(IRP, A) {}
4983 
4984   /// See AbstractAttribute::manifest(...).
4985   ChangeStatus manifest(Attributor &A) override {
4986     return AAValueSimplifyImpl::manifest(A);
4987   }
4988 
4989   void trackStatistics() const override {
4990     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4991   }
4992 };
4993 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4994   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4995       : AAValueSimplifyFloating(IRP, A) {}
4996 
4997   /// See AbstractAttribute::manifest(...).
4998   ChangeStatus manifest(Attributor &A) override {
4999     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5000 
5001     if (SimplifiedAssociatedValue.hasValue() &&
5002         !SimplifiedAssociatedValue.getValue())
5003       return Changed;
5004 
5005     Value &V = getAssociatedValue();
5006     auto *C = SimplifiedAssociatedValue.hasValue()
5007                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
5008                   : UndefValue::get(V.getType());
5009     if (C) {
5010       Use &U = cast<CallBase>(&getAnchorValue())
5011                    ->getArgOperandUse(getCallSiteArgNo());
5012       // We can replace the AssociatedValue with the constant.
5013       if (&V != C && V.getType() == C->getType()) {
5014         if (A.changeUseAfterManifest(U, *C))
5015           Changed = ChangeStatus::CHANGED;
5016       }
5017     }
5018 
5019     return Changed | AAValueSimplify::manifest(A);
5020   }
5021 
5022   void trackStatistics() const override {
5023     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5024   }
5025 };
5026 
5027 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5028 struct AAHeapToStackImpl : public AAHeapToStack {
5029   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5030       : AAHeapToStack(IRP, A) {}
5031 
5032   const std::string getAsStr() const override {
5033     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5034   }
5035 
5036   ChangeStatus manifest(Attributor &A) override {
5037     assert(getState().isValidState() &&
5038            "Attempted to manifest an invalid state!");
5039 
5040     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5041     Function *F = getAnchorScope();
5042     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5043 
5044     for (Instruction *MallocCall : MallocCalls) {
5045       // This malloc cannot be replaced.
5046       if (BadMallocCalls.count(MallocCall))
5047         continue;
5048 
5049       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5050         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5051         A.deleteAfterManifest(*FreeCall);
5052         HasChanged = ChangeStatus::CHANGED;
5053       }
5054 
5055       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5056                         << "\n");
5057 
5058       Align Alignment;
5059       Value *Size;
5060       if (isCallocLikeFn(MallocCall, TLI)) {
5061         auto *Num = MallocCall->getOperand(0);
5062         auto *SizeT = MallocCall->getOperand(1);
5063         IRBuilder<> B(MallocCall);
5064         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5065       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5066         Size = MallocCall->getOperand(1);
5067         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5068                                    ->getValue()
5069                                    .getZExtValue())
5070                         .valueOrOne();
5071       } else {
5072         Size = MallocCall->getOperand(0);
5073       }
5074 
5075       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5076       Instruction *AI =
5077           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5078                          "", MallocCall->getNextNode());
5079 
5080       if (AI->getType() != MallocCall->getType())
5081         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5082                              AI->getNextNode());
5083 
5084       A.changeValueAfterManifest(*MallocCall, *AI);
5085 
5086       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5087         auto *NBB = II->getNormalDest();
5088         BranchInst::Create(NBB, MallocCall->getParent());
5089         A.deleteAfterManifest(*MallocCall);
5090       } else {
5091         A.deleteAfterManifest(*MallocCall);
5092       }
5093 
5094       // Zero out the allocated memory if it was a calloc.
5095       if (isCallocLikeFn(MallocCall, TLI)) {
5096         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5097                                    AI->getNextNode());
5098         Value *Ops[] = {
5099             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5100             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5101 
5102         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5103         Module *M = F->getParent();
5104         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5105         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5106       }
5107       HasChanged = ChangeStatus::CHANGED;
5108     }
5109 
5110     return HasChanged;
5111   }
5112 
5113   /// Collection of all malloc calls in a function.
5114   SmallSetVector<Instruction *, 4> MallocCalls;
5115 
5116   /// Collection of malloc calls that cannot be converted.
5117   DenseSet<const Instruction *> BadMallocCalls;
5118 
5119   /// A map for each malloc call to the set of associated free calls.
5120   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5121 
5122   ChangeStatus updateImpl(Attributor &A) override;
5123 };
5124 
5125 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5126   const Function *F = getAnchorScope();
5127   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5128 
5129   MustBeExecutedContextExplorer &Explorer =
5130       A.getInfoCache().getMustBeExecutedContextExplorer();
5131 
5132   auto FreeCheck = [&](Instruction &I) {
5133     const auto &Frees = FreesForMalloc.lookup(&I);
5134     if (Frees.size() != 1)
5135       return false;
5136     Instruction *UniqueFree = *Frees.begin();
5137     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5138   };
5139 
5140   auto UsesCheck = [&](Instruction &I) {
5141     bool ValidUsesOnly = true;
5142     bool MustUse = true;
5143     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5144       Instruction *UserI = cast<Instruction>(U.getUser());
5145       if (isa<LoadInst>(UserI))
5146         return true;
5147       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5148         if (SI->getValueOperand() == U.get()) {
5149           LLVM_DEBUG(dbgs()
5150                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5151           ValidUsesOnly = false;
5152         } else {
5153           // A store into the malloc'ed memory is fine.
5154         }
5155         return true;
5156       }
5157       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5158         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5159           return true;
5160         // Record malloc.
5161         if (isFreeCall(UserI, TLI)) {
5162           if (MustUse) {
5163             FreesForMalloc[&I].insert(UserI);
5164           } else {
5165             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5166                               << *UserI << "\n");
5167             ValidUsesOnly = false;
5168           }
5169           return true;
5170         }
5171 
5172         unsigned ArgNo = CB->getArgOperandNo(&U);
5173 
5174         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5175             *this, IRPosition::callsite_argument(*CB, ArgNo),
5176             DepClassTy::REQUIRED);
5177 
5178         // If a callsite argument use is nofree, we are fine.
5179         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5180             *this, IRPosition::callsite_argument(*CB, ArgNo),
5181             DepClassTy::REQUIRED);
5182 
5183         if (!NoCaptureAA.isAssumedNoCapture() ||
5184             !ArgNoFreeAA.isAssumedNoFree()) {
5185           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5186           ValidUsesOnly = false;
5187         }
5188         return true;
5189       }
5190 
5191       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5192           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5193         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5194         Follow = true;
5195         return true;
5196       }
5197       // Unknown user for which we can not track uses further (in a way that
5198       // makes sense).
5199       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5200       ValidUsesOnly = false;
5201       return true;
5202     };
5203     A.checkForAllUses(Pred, *this, I);
5204     return ValidUsesOnly;
5205   };
5206 
5207   auto MallocCallocCheck = [&](Instruction &I) {
5208     if (BadMallocCalls.count(&I))
5209       return true;
5210 
5211     bool IsMalloc = isMallocLikeFn(&I, TLI);
5212     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5213     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5214     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5215       BadMallocCalls.insert(&I);
5216       return true;
5217     }
5218 
5219     if (IsMalloc) {
5220       if (MaxHeapToStackSize == -1) {
5221         if (UsesCheck(I) || FreeCheck(I)) {
5222           MallocCalls.insert(&I);
5223           return true;
5224         }
5225       }
5226       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5227         if (Size->getValue().ule(MaxHeapToStackSize))
5228           if (UsesCheck(I) || FreeCheck(I)) {
5229             MallocCalls.insert(&I);
5230             return true;
5231           }
5232     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5233       if (MaxHeapToStackSize == -1) {
5234         if (UsesCheck(I) || FreeCheck(I)) {
5235           MallocCalls.insert(&I);
5236           return true;
5237         }
5238       }
5239       // Only if the alignment and sizes are constant.
5240       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5241         if (Size->getValue().ule(MaxHeapToStackSize))
5242           if (UsesCheck(I) || FreeCheck(I)) {
5243             MallocCalls.insert(&I);
5244             return true;
5245           }
5246     } else if (IsCalloc) {
5247       if (MaxHeapToStackSize == -1) {
5248         if (UsesCheck(I) || FreeCheck(I)) {
5249           MallocCalls.insert(&I);
5250           return true;
5251         }
5252       }
5253       bool Overflow = false;
5254       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5255         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5256           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5257                   .ule(MaxHeapToStackSize))
5258             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5259               MallocCalls.insert(&I);
5260               return true;
5261             }
5262     }
5263 
5264     BadMallocCalls.insert(&I);
5265     return true;
5266   };
5267 
5268   size_t NumBadMallocs = BadMallocCalls.size();
5269 
5270   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5271 
5272   if (NumBadMallocs != BadMallocCalls.size())
5273     return ChangeStatus::CHANGED;
5274 
5275   return ChangeStatus::UNCHANGED;
5276 }
5277 
5278 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5279   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5280       : AAHeapToStackImpl(IRP, A) {}
5281 
5282   /// See AbstractAttribute::trackStatistics().
5283   void trackStatistics() const override {
5284     STATS_DECL(
5285         MallocCalls, Function,
5286         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5287     for (auto *C : MallocCalls)
5288       if (!BadMallocCalls.count(C))
5289         ++BUILD_STAT_NAME(MallocCalls, Function);
5290   }
5291 };
5292 
5293 /// ----------------------- Privatizable Pointers ------------------------------
5294 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5295   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5296       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5297 
5298   ChangeStatus indicatePessimisticFixpoint() override {
5299     AAPrivatizablePtr::indicatePessimisticFixpoint();
5300     PrivatizableType = nullptr;
5301     return ChangeStatus::CHANGED;
5302   }
5303 
5304   /// Identify the type we can chose for a private copy of the underlying
5305   /// argument. None means it is not clear yet, nullptr means there is none.
5306   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5307 
5308   /// Return a privatizable type that encloses both T0 and T1.
5309   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5310   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5311     if (!T0.hasValue())
5312       return T1;
5313     if (!T1.hasValue())
5314       return T0;
5315     if (T0 == T1)
5316       return T0;
5317     return nullptr;
5318   }
5319 
5320   Optional<Type *> getPrivatizableType() const override {
5321     return PrivatizableType;
5322   }
5323 
5324   const std::string getAsStr() const override {
5325     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5326   }
5327 
5328 protected:
5329   Optional<Type *> PrivatizableType;
5330 };
5331 
5332 // TODO: Do this for call site arguments (probably also other values) as well.
5333 
5334 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5335   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5336       : AAPrivatizablePtrImpl(IRP, A) {}
5337 
5338   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5339   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5340     // If this is a byval argument and we know all the call sites (so we can
5341     // rewrite them), there is no need to check them explicitly.
5342     bool AllCallSitesKnown;
5343     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5344         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5345                                true, AllCallSitesKnown))
5346       return getAssociatedValue().getType()->getPointerElementType();
5347 
5348     Optional<Type *> Ty;
5349     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5350 
5351     // Make sure the associated call site argument has the same type at all call
5352     // sites and it is an allocation we know is safe to privatize, for now that
5353     // means we only allow alloca instructions.
5354     // TODO: We can additionally analyze the accesses in the callee to  create
5355     //       the type from that information instead. That is a little more
5356     //       involved and will be done in a follow up patch.
5357     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5358       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5359       // Check if a coresponding argument was found or if it is one not
5360       // associated (which can happen for callback calls).
5361       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5362         return false;
5363 
5364       // Check that all call sites agree on a type.
5365       auto &PrivCSArgAA =
5366           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5367       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5368 
5369       LLVM_DEBUG({
5370         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5371         if (CSTy.hasValue() && CSTy.getValue())
5372           CSTy.getValue()->print(dbgs());
5373         else if (CSTy.hasValue())
5374           dbgs() << "<nullptr>";
5375         else
5376           dbgs() << "<none>";
5377       });
5378 
5379       Ty = combineTypes(Ty, CSTy);
5380 
5381       LLVM_DEBUG({
5382         dbgs() << " : New Type: ";
5383         if (Ty.hasValue() && Ty.getValue())
5384           Ty.getValue()->print(dbgs());
5385         else if (Ty.hasValue())
5386           dbgs() << "<nullptr>";
5387         else
5388           dbgs() << "<none>";
5389         dbgs() << "\n";
5390       });
5391 
5392       return !Ty.hasValue() || Ty.getValue();
5393     };
5394 
5395     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5396       return nullptr;
5397     return Ty;
5398   }
5399 
5400   /// See AbstractAttribute::updateImpl(...).
5401   ChangeStatus updateImpl(Attributor &A) override {
5402     PrivatizableType = identifyPrivatizableType(A);
5403     if (!PrivatizableType.hasValue())
5404       return ChangeStatus::UNCHANGED;
5405     if (!PrivatizableType.getValue())
5406       return indicatePessimisticFixpoint();
5407 
5408     // The dependence is optional so we don't give up once we give up on the
5409     // alignment.
5410     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5411                         DepClassTy::OPTIONAL);
5412 
5413     // Avoid arguments with padding for now.
5414     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5415         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5416                                                 A.getInfoCache().getDL())) {
5417       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5418       return indicatePessimisticFixpoint();
5419     }
5420 
5421     // Verify callee and caller agree on how the promoted argument would be
5422     // passed.
5423     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5424     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5425     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5426     Function &Fn = *getIRPosition().getAnchorScope();
5427     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5428     ArgsToPromote.insert(getAssociatedArgument());
5429     const auto *TTI =
5430         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5431     if (!TTI ||
5432         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5433             Fn, *TTI, ArgsToPromote, Dummy) ||
5434         ArgsToPromote.empty()) {
5435       LLVM_DEBUG(
5436           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5437                  << Fn.getName() << "\n");
5438       return indicatePessimisticFixpoint();
5439     }
5440 
5441     // Collect the types that will replace the privatizable type in the function
5442     // signature.
5443     SmallVector<Type *, 16> ReplacementTypes;
5444     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5445 
5446     // Register a rewrite of the argument.
5447     Argument *Arg = getAssociatedArgument();
5448     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5449       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5450       return indicatePessimisticFixpoint();
5451     }
5452 
5453     unsigned ArgNo = Arg->getArgNo();
5454 
5455     // Helper to check if for the given call site the associated argument is
5456     // passed to a callback where the privatization would be different.
5457     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5458       SmallVector<const Use *, 4> CallbackUses;
5459       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5460       for (const Use *U : CallbackUses) {
5461         AbstractCallSite CBACS(U);
5462         assert(CBACS && CBACS.isCallbackCall());
5463         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5464           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5465 
5466           LLVM_DEBUG({
5467             dbgs()
5468                 << "[AAPrivatizablePtr] Argument " << *Arg
5469                 << "check if can be privatized in the context of its parent ("
5470                 << Arg->getParent()->getName()
5471                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5472                    "callback ("
5473                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5474                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5475                 << CBACS.getCallArgOperand(CBArg) << " vs "
5476                 << CB.getArgOperand(ArgNo) << "\n"
5477                 << "[AAPrivatizablePtr] " << CBArg << " : "
5478                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5479           });
5480 
5481           if (CBArgNo != int(ArgNo))
5482             continue;
5483           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5484               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5485           if (CBArgPrivAA.isValidState()) {
5486             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5487             if (!CBArgPrivTy.hasValue())
5488               continue;
5489             if (CBArgPrivTy.getValue() == PrivatizableType)
5490               continue;
5491           }
5492 
5493           LLVM_DEBUG({
5494             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5495                    << " cannot be privatized in the context of its parent ("
5496                    << Arg->getParent()->getName()
5497                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5498                       "callback ("
5499                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5500                    << ").\n[AAPrivatizablePtr] for which the argument "
5501                       "privatization is not compatible.\n";
5502           });
5503           return false;
5504         }
5505       }
5506       return true;
5507     };
5508 
5509     // Helper to check if for the given call site the associated argument is
5510     // passed to a direct call where the privatization would be different.
5511     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5512       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5513       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5514       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5515              "Expected a direct call operand for callback call operand");
5516 
5517       LLVM_DEBUG({
5518         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5519                << " check if be privatized in the context of its parent ("
5520                << Arg->getParent()->getName()
5521                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5522                   "direct call of ("
5523                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5524                << ").\n";
5525       });
5526 
5527       Function *DCCallee = DC->getCalledFunction();
5528       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5529         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5530             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5531             DepClassTy::REQUIRED);
5532         if (DCArgPrivAA.isValidState()) {
5533           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5534           if (!DCArgPrivTy.hasValue())
5535             return true;
5536           if (DCArgPrivTy.getValue() == PrivatizableType)
5537             return true;
5538         }
5539       }
5540 
5541       LLVM_DEBUG({
5542         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5543                << " cannot be privatized in the context of its parent ("
5544                << Arg->getParent()->getName()
5545                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5546                   "direct call of ("
5547                << ACS.getInstruction()->getCalledFunction()->getName()
5548                << ").\n[AAPrivatizablePtr] for which the argument "
5549                   "privatization is not compatible.\n";
5550       });
5551       return false;
5552     };
5553 
5554     // Helper to check if the associated argument is used at the given abstract
5555     // call site in a way that is incompatible with the privatization assumed
5556     // here.
5557     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5558       if (ACS.isDirectCall())
5559         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5560       if (ACS.isCallbackCall())
5561         return IsCompatiblePrivArgOfDirectCS(ACS);
5562       return false;
5563     };
5564 
5565     bool AllCallSitesKnown;
5566     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5567                                 AllCallSitesKnown))
5568       return indicatePessimisticFixpoint();
5569 
5570     return ChangeStatus::UNCHANGED;
5571   }
5572 
5573   /// Given a type to private \p PrivType, collect the constituates (which are
5574   /// used) in \p ReplacementTypes.
5575   static void
5576   identifyReplacementTypes(Type *PrivType,
5577                            SmallVectorImpl<Type *> &ReplacementTypes) {
5578     // TODO: For now we expand the privatization type to the fullest which can
5579     //       lead to dead arguments that need to be removed later.
5580     assert(PrivType && "Expected privatizable type!");
5581 
5582     // Traverse the type, extract constituate types on the outermost level.
5583     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5584       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5585         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5586     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5587       ReplacementTypes.append(PrivArrayType->getNumElements(),
5588                               PrivArrayType->getElementType());
5589     } else {
5590       ReplacementTypes.push_back(PrivType);
5591     }
5592   }
5593 
5594   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5595   /// The values needed are taken from the arguments of \p F starting at
5596   /// position \p ArgNo.
5597   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5598                                    unsigned ArgNo, Instruction &IP) {
5599     assert(PrivType && "Expected privatizable type!");
5600 
5601     IRBuilder<NoFolder> IRB(&IP);
5602     const DataLayout &DL = F.getParent()->getDataLayout();
5603 
5604     // Traverse the type, build GEPs and stores.
5605     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5606       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5607       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5608         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5609         Value *Ptr = constructPointer(
5610             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5611         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5612       }
5613     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5614       Type *PointeeTy = PrivArrayType->getElementType();
5615       Type *PointeePtrTy = PointeeTy->getPointerTo();
5616       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5617       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5618         Value *Ptr =
5619             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5620         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5621       }
5622     } else {
5623       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5624     }
5625   }
5626 
5627   /// Extract values from \p Base according to the type \p PrivType at the
5628   /// call position \p ACS. The values are appended to \p ReplacementValues.
5629   void createReplacementValues(Align Alignment, Type *PrivType,
5630                                AbstractCallSite ACS, Value *Base,
5631                                SmallVectorImpl<Value *> &ReplacementValues) {
5632     assert(Base && "Expected base value!");
5633     assert(PrivType && "Expected privatizable type!");
5634     Instruction *IP = ACS.getInstruction();
5635 
5636     IRBuilder<NoFolder> IRB(IP);
5637     const DataLayout &DL = IP->getModule()->getDataLayout();
5638 
5639     if (Base->getType()->getPointerElementType() != PrivType)
5640       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5641                                                  "", ACS.getInstruction());
5642 
5643     // Traverse the type, build GEPs and loads.
5644     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5645       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5646       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5647         Type *PointeeTy = PrivStructType->getElementType(u);
5648         Value *Ptr =
5649             constructPointer(PointeeTy->getPointerTo(), Base,
5650                              PrivStructLayout->getElementOffset(u), IRB, DL);
5651         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5652         L->setAlignment(Alignment);
5653         ReplacementValues.push_back(L);
5654       }
5655     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5656       Type *PointeeTy = PrivArrayType->getElementType();
5657       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5658       Type *PointeePtrTy = PointeeTy->getPointerTo();
5659       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5660         Value *Ptr =
5661             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5662         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5663         L->setAlignment(Alignment);
5664         ReplacementValues.push_back(L);
5665       }
5666     } else {
5667       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5668       L->setAlignment(Alignment);
5669       ReplacementValues.push_back(L);
5670     }
5671   }
5672 
5673   /// See AbstractAttribute::manifest(...)
5674   ChangeStatus manifest(Attributor &A) override {
5675     if (!PrivatizableType.hasValue())
5676       return ChangeStatus::UNCHANGED;
5677     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5678 
5679     // Collect all tail calls in the function as we cannot allow new allocas to
5680     // escape into tail recursion.
5681     // TODO: Be smarter about new allocas escaping into tail calls.
5682     SmallVector<CallInst *, 16> TailCalls;
5683     if (!A.checkForAllInstructions(
5684             [&](Instruction &I) {
5685               CallInst &CI = cast<CallInst>(I);
5686               if (CI.isTailCall())
5687                 TailCalls.push_back(&CI);
5688               return true;
5689             },
5690             *this, {Instruction::Call}))
5691       return ChangeStatus::UNCHANGED;
5692 
5693     Argument *Arg = getAssociatedArgument();
5694     // Query AAAlign attribute for alignment of associated argument to
5695     // determine the best alignment of loads.
5696     const auto &AlignAA =
5697         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5698 
5699     // Callback to repair the associated function. A new alloca is placed at the
5700     // beginning and initialized with the values passed through arguments. The
5701     // new alloca replaces the use of the old pointer argument.
5702     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5703         [=](const Attributor::ArgumentReplacementInfo &ARI,
5704             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5705           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5706           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5707           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5708                                            Arg->getName() + ".priv", IP);
5709           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5710                                ArgIt->getArgNo(), *IP);
5711 
5712           if (AI->getType() != Arg->getType())
5713             AI =
5714                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5715           Arg->replaceAllUsesWith(AI);
5716 
5717           for (CallInst *CI : TailCalls)
5718             CI->setTailCall(false);
5719         };
5720 
5721     // Callback to repair a call site of the associated function. The elements
5722     // of the privatizable type are loaded prior to the call and passed to the
5723     // new function version.
5724     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5725         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5726                       AbstractCallSite ACS,
5727                       SmallVectorImpl<Value *> &NewArgOperands) {
5728           // When no alignment is specified for the load instruction,
5729           // natural alignment is assumed.
5730           createReplacementValues(
5731               assumeAligned(AlignAA.getAssumedAlign()),
5732               PrivatizableType.getValue(), ACS,
5733               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5734               NewArgOperands);
5735         };
5736 
5737     // Collect the types that will replace the privatizable type in the function
5738     // signature.
5739     SmallVector<Type *, 16> ReplacementTypes;
5740     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5741 
5742     // Register a rewrite of the argument.
5743     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5744                                            std::move(FnRepairCB),
5745                                            std::move(ACSRepairCB)))
5746       return ChangeStatus::CHANGED;
5747     return ChangeStatus::UNCHANGED;
5748   }
5749 
5750   /// See AbstractAttribute::trackStatistics()
5751   void trackStatistics() const override {
5752     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5753   }
5754 };
5755 
5756 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5757   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5758       : AAPrivatizablePtrImpl(IRP, A) {}
5759 
5760   /// See AbstractAttribute::initialize(...).
5761   virtual void initialize(Attributor &A) override {
5762     // TODO: We can privatize more than arguments.
5763     indicatePessimisticFixpoint();
5764   }
5765 
5766   ChangeStatus updateImpl(Attributor &A) override {
5767     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5768                      "updateImpl will not be called");
5769   }
5770 
5771   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5772   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5773     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5774     if (!Obj) {
5775       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5776       return nullptr;
5777     }
5778 
5779     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5780       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5781         if (CI->isOne())
5782           return Obj->getType()->getPointerElementType();
5783     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5784       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5785           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5786       if (PrivArgAA.isAssumedPrivatizablePtr())
5787         return Obj->getType()->getPointerElementType();
5788     }
5789 
5790     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5791                          "alloca nor privatizable argument: "
5792                       << *Obj << "!\n");
5793     return nullptr;
5794   }
5795 
5796   /// See AbstractAttribute::trackStatistics()
5797   void trackStatistics() const override {
5798     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5799   }
5800 };
5801 
5802 struct AAPrivatizablePtrCallSiteArgument final
5803     : public AAPrivatizablePtrFloating {
5804   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5805       : AAPrivatizablePtrFloating(IRP, A) {}
5806 
5807   /// See AbstractAttribute::initialize(...).
5808   void initialize(Attributor &A) override {
5809     if (getIRPosition().hasAttr(Attribute::ByVal))
5810       indicateOptimisticFixpoint();
5811   }
5812 
5813   /// See AbstractAttribute::updateImpl(...).
5814   ChangeStatus updateImpl(Attributor &A) override {
5815     PrivatizableType = identifyPrivatizableType(A);
5816     if (!PrivatizableType.hasValue())
5817       return ChangeStatus::UNCHANGED;
5818     if (!PrivatizableType.getValue())
5819       return indicatePessimisticFixpoint();
5820 
5821     const IRPosition &IRP = getIRPosition();
5822     auto &NoCaptureAA =
5823         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5824     if (!NoCaptureAA.isAssumedNoCapture()) {
5825       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5826       return indicatePessimisticFixpoint();
5827     }
5828 
5829     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5830     if (!NoAliasAA.isAssumedNoAlias()) {
5831       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5832       return indicatePessimisticFixpoint();
5833     }
5834 
5835     const auto &MemBehaviorAA =
5836         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5837     if (!MemBehaviorAA.isAssumedReadOnly()) {
5838       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5839       return indicatePessimisticFixpoint();
5840     }
5841 
5842     return ChangeStatus::UNCHANGED;
5843   }
5844 
5845   /// See AbstractAttribute::trackStatistics()
5846   void trackStatistics() const override {
5847     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5848   }
5849 };
5850 
5851 struct AAPrivatizablePtrCallSiteReturned final
5852     : public AAPrivatizablePtrFloating {
5853   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5854       : AAPrivatizablePtrFloating(IRP, A) {}
5855 
5856   /// See AbstractAttribute::initialize(...).
5857   void initialize(Attributor &A) override {
5858     // TODO: We can privatize more than arguments.
5859     indicatePessimisticFixpoint();
5860   }
5861 
5862   /// See AbstractAttribute::trackStatistics()
5863   void trackStatistics() const override {
5864     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5865   }
5866 };
5867 
5868 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5869   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5870       : AAPrivatizablePtrFloating(IRP, A) {}
5871 
5872   /// See AbstractAttribute::initialize(...).
5873   void initialize(Attributor &A) override {
5874     // TODO: We can privatize more than arguments.
5875     indicatePessimisticFixpoint();
5876   }
5877 
5878   /// See AbstractAttribute::trackStatistics()
5879   void trackStatistics() const override {
5880     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5881   }
5882 };
5883 
5884 /// -------------------- Memory Behavior Attributes ----------------------------
5885 /// Includes read-none, read-only, and write-only.
5886 /// ----------------------------------------------------------------------------
5887 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5888   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5889       : AAMemoryBehavior(IRP, A) {}
5890 
5891   /// See AbstractAttribute::initialize(...).
5892   void initialize(Attributor &A) override {
5893     intersectAssumedBits(BEST_STATE);
5894     getKnownStateFromValue(getIRPosition(), getState());
5895     AAMemoryBehavior::initialize(A);
5896   }
5897 
5898   /// Return the memory behavior information encoded in the IR for \p IRP.
5899   static void getKnownStateFromValue(const IRPosition &IRP,
5900                                      BitIntegerState &State,
5901                                      bool IgnoreSubsumingPositions = false) {
5902     SmallVector<Attribute, 2> Attrs;
5903     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5904     for (const Attribute &Attr : Attrs) {
5905       switch (Attr.getKindAsEnum()) {
5906       case Attribute::ReadNone:
5907         State.addKnownBits(NO_ACCESSES);
5908         break;
5909       case Attribute::ReadOnly:
5910         State.addKnownBits(NO_WRITES);
5911         break;
5912       case Attribute::WriteOnly:
5913         State.addKnownBits(NO_READS);
5914         break;
5915       default:
5916         llvm_unreachable("Unexpected attribute!");
5917       }
5918     }
5919 
5920     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5921       if (!I->mayReadFromMemory())
5922         State.addKnownBits(NO_READS);
5923       if (!I->mayWriteToMemory())
5924         State.addKnownBits(NO_WRITES);
5925     }
5926   }
5927 
5928   /// See AbstractAttribute::getDeducedAttributes(...).
5929   void getDeducedAttributes(LLVMContext &Ctx,
5930                             SmallVectorImpl<Attribute> &Attrs) const override {
5931     assert(Attrs.size() == 0);
5932     if (isAssumedReadNone())
5933       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5934     else if (isAssumedReadOnly())
5935       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5936     else if (isAssumedWriteOnly())
5937       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5938     assert(Attrs.size() <= 1);
5939   }
5940 
5941   /// See AbstractAttribute::manifest(...).
5942   ChangeStatus manifest(Attributor &A) override {
5943     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5944       return ChangeStatus::UNCHANGED;
5945 
5946     const IRPosition &IRP = getIRPosition();
5947 
5948     // Check if we would improve the existing attributes first.
5949     SmallVector<Attribute, 4> DeducedAttrs;
5950     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5951     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5952           return IRP.hasAttr(Attr.getKindAsEnum(),
5953                              /* IgnoreSubsumingPositions */ true);
5954         }))
5955       return ChangeStatus::UNCHANGED;
5956 
5957     // Clear existing attributes.
5958     IRP.removeAttrs(AttrKinds);
5959 
5960     // Use the generic manifest method.
5961     return IRAttribute::manifest(A);
5962   }
5963 
5964   /// See AbstractState::getAsStr().
5965   const std::string getAsStr() const override {
5966     if (isAssumedReadNone())
5967       return "readnone";
5968     if (isAssumedReadOnly())
5969       return "readonly";
5970     if (isAssumedWriteOnly())
5971       return "writeonly";
5972     return "may-read/write";
5973   }
5974 
5975   /// The set of IR attributes AAMemoryBehavior deals with.
5976   static const Attribute::AttrKind AttrKinds[3];
5977 };
5978 
5979 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5980     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5981 
5982 /// Memory behavior attribute for a floating value.
5983 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5984   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5985       : AAMemoryBehaviorImpl(IRP, A) {}
5986 
5987   /// See AbstractAttribute::initialize(...).
5988   void initialize(Attributor &A) override {
5989     AAMemoryBehaviorImpl::initialize(A);
5990     addUsesOf(A, getAssociatedValue());
5991   }
5992 
5993   /// See AbstractAttribute::updateImpl(...).
5994   ChangeStatus updateImpl(Attributor &A) override;
5995 
5996   /// See AbstractAttribute::trackStatistics()
5997   void trackStatistics() const override {
5998     if (isAssumedReadNone())
5999       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6000     else if (isAssumedReadOnly())
6001       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6002     else if (isAssumedWriteOnly())
6003       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6004   }
6005 
6006 private:
6007   /// Return true if users of \p UserI might access the underlying
6008   /// variable/location described by \p U and should therefore be analyzed.
6009   bool followUsersOfUseIn(Attributor &A, const Use *U,
6010                           const Instruction *UserI);
6011 
6012   /// Update the state according to the effect of use \p U in \p UserI.
6013   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6014 
6015 protected:
6016   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6017   void addUsesOf(Attributor &A, const Value &V);
6018 
6019   /// Container for (transitive) uses of the associated argument.
6020   SmallVector<const Use *, 8> Uses;
6021 
6022   /// Set to remember the uses we already traversed.
6023   SmallPtrSet<const Use *, 8> Visited;
6024 };
6025 
6026 /// Memory behavior attribute for function argument.
6027 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6028   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6029       : AAMemoryBehaviorFloating(IRP, A) {}
6030 
6031   /// See AbstractAttribute::initialize(...).
6032   void initialize(Attributor &A) override {
6033     intersectAssumedBits(BEST_STATE);
6034     const IRPosition &IRP = getIRPosition();
6035     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6036     // can query it when we use has/getAttr. That would allow us to reuse the
6037     // initialize of the base class here.
6038     bool HasByVal =
6039         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6040     getKnownStateFromValue(IRP, getState(),
6041                            /* IgnoreSubsumingPositions */ HasByVal);
6042 
6043     // Initialize the use vector with all direct uses of the associated value.
6044     Argument *Arg = getAssociatedArgument();
6045     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6046       indicatePessimisticFixpoint();
6047     } else {
6048       addUsesOf(A, *Arg);
6049     }
6050   }
6051 
6052   ChangeStatus manifest(Attributor &A) override {
6053     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6054     if (!getAssociatedValue().getType()->isPointerTy())
6055       return ChangeStatus::UNCHANGED;
6056 
6057     // TODO: From readattrs.ll: "inalloca parameters are always
6058     //                           considered written"
6059     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6060       removeKnownBits(NO_WRITES);
6061       removeAssumedBits(NO_WRITES);
6062     }
6063     return AAMemoryBehaviorFloating::manifest(A);
6064   }
6065 
6066   /// See AbstractAttribute::trackStatistics()
6067   void trackStatistics() const override {
6068     if (isAssumedReadNone())
6069       STATS_DECLTRACK_ARG_ATTR(readnone)
6070     else if (isAssumedReadOnly())
6071       STATS_DECLTRACK_ARG_ATTR(readonly)
6072     else if (isAssumedWriteOnly())
6073       STATS_DECLTRACK_ARG_ATTR(writeonly)
6074   }
6075 };
6076 
6077 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6078   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6079       : AAMemoryBehaviorArgument(IRP, A) {}
6080 
6081   /// See AbstractAttribute::initialize(...).
6082   void initialize(Attributor &A) override {
6083     // If we don't have an associated attribute this is either a variadic call
6084     // or an indirect call, either way, nothing to do here.
6085     Argument *Arg = getAssociatedArgument();
6086     if (!Arg) {
6087       indicatePessimisticFixpoint();
6088       return;
6089     }
6090     if (Arg->hasByValAttr()) {
6091       addKnownBits(NO_WRITES);
6092       removeKnownBits(NO_READS);
6093       removeAssumedBits(NO_READS);
6094     }
6095     AAMemoryBehaviorArgument::initialize(A);
6096     if (getAssociatedFunction()->isDeclaration())
6097       indicatePessimisticFixpoint();
6098   }
6099 
6100   /// See AbstractAttribute::updateImpl(...).
6101   ChangeStatus updateImpl(Attributor &A) override {
6102     // TODO: Once we have call site specific value information we can provide
6103     //       call site specific liveness liveness information and then it makes
6104     //       sense to specialize attributes for call sites arguments instead of
6105     //       redirecting requests to the callee argument.
6106     Argument *Arg = getAssociatedArgument();
6107     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6108     auto &ArgAA =
6109         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6110     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6111   }
6112 
6113   /// See AbstractAttribute::trackStatistics()
6114   void trackStatistics() const override {
6115     if (isAssumedReadNone())
6116       STATS_DECLTRACK_CSARG_ATTR(readnone)
6117     else if (isAssumedReadOnly())
6118       STATS_DECLTRACK_CSARG_ATTR(readonly)
6119     else if (isAssumedWriteOnly())
6120       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6121   }
6122 };
6123 
6124 /// Memory behavior attribute for a call site return position.
6125 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6126   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6127       : AAMemoryBehaviorFloating(IRP, A) {}
6128 
6129   /// See AbstractAttribute::initialize(...).
6130   void initialize(Attributor &A) override {
6131     AAMemoryBehaviorImpl::initialize(A);
6132     Function *F = getAssociatedFunction();
6133     if (!F || F->isDeclaration())
6134       indicatePessimisticFixpoint();
6135   }
6136 
6137   /// See AbstractAttribute::manifest(...).
6138   ChangeStatus manifest(Attributor &A) override {
6139     // We do not annotate returned values.
6140     return ChangeStatus::UNCHANGED;
6141   }
6142 
6143   /// See AbstractAttribute::trackStatistics()
6144   void trackStatistics() const override {}
6145 };
6146 
6147 /// An AA to represent the memory behavior function attributes.
6148 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6149   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6150       : AAMemoryBehaviorImpl(IRP, A) {}
6151 
6152   /// See AbstractAttribute::updateImpl(Attributor &A).
6153   virtual ChangeStatus updateImpl(Attributor &A) override;
6154 
6155   /// See AbstractAttribute::manifest(...).
6156   ChangeStatus manifest(Attributor &A) override {
6157     Function &F = cast<Function>(getAnchorValue());
6158     if (isAssumedReadNone()) {
6159       F.removeFnAttr(Attribute::ArgMemOnly);
6160       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6161       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6162     }
6163     return AAMemoryBehaviorImpl::manifest(A);
6164   }
6165 
6166   /// See AbstractAttribute::trackStatistics()
6167   void trackStatistics() const override {
6168     if (isAssumedReadNone())
6169       STATS_DECLTRACK_FN_ATTR(readnone)
6170     else if (isAssumedReadOnly())
6171       STATS_DECLTRACK_FN_ATTR(readonly)
6172     else if (isAssumedWriteOnly())
6173       STATS_DECLTRACK_FN_ATTR(writeonly)
6174   }
6175 };
6176 
6177 /// AAMemoryBehavior attribute for call sites.
6178 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6179   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6180       : AAMemoryBehaviorImpl(IRP, A) {}
6181 
6182   /// See AbstractAttribute::initialize(...).
6183   void initialize(Attributor &A) override {
6184     AAMemoryBehaviorImpl::initialize(A);
6185     Function *F = getAssociatedFunction();
6186     if (!F || F->isDeclaration())
6187       indicatePessimisticFixpoint();
6188   }
6189 
6190   /// See AbstractAttribute::updateImpl(...).
6191   ChangeStatus updateImpl(Attributor &A) override {
6192     // TODO: Once we have call site specific value information we can provide
6193     //       call site specific liveness liveness information and then it makes
6194     //       sense to specialize attributes for call sites arguments instead of
6195     //       redirecting requests to the callee argument.
6196     Function *F = getAssociatedFunction();
6197     const IRPosition &FnPos = IRPosition::function(*F);
6198     auto &FnAA =
6199         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6200     return clampStateAndIndicateChange(getState(), FnAA.getState());
6201   }
6202 
6203   /// See AbstractAttribute::trackStatistics()
6204   void trackStatistics() const override {
6205     if (isAssumedReadNone())
6206       STATS_DECLTRACK_CS_ATTR(readnone)
6207     else if (isAssumedReadOnly())
6208       STATS_DECLTRACK_CS_ATTR(readonly)
6209     else if (isAssumedWriteOnly())
6210       STATS_DECLTRACK_CS_ATTR(writeonly)
6211   }
6212 };
6213 
6214 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6215 
6216   // The current assumed state used to determine a change.
6217   auto AssumedState = getAssumed();
6218 
6219   auto CheckRWInst = [&](Instruction &I) {
6220     // If the instruction has an own memory behavior state, use it to restrict
6221     // the local state. No further analysis is required as the other memory
6222     // state is as optimistic as it gets.
6223     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6224       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6225           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6226       intersectAssumedBits(MemBehaviorAA.getAssumed());
6227       return !isAtFixpoint();
6228     }
6229 
6230     // Remove access kind modifiers if necessary.
6231     if (I.mayReadFromMemory())
6232       removeAssumedBits(NO_READS);
6233     if (I.mayWriteToMemory())
6234       removeAssumedBits(NO_WRITES);
6235     return !isAtFixpoint();
6236   };
6237 
6238   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6239     return indicatePessimisticFixpoint();
6240 
6241   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6242                                         : ChangeStatus::UNCHANGED;
6243 }
6244 
6245 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6246 
6247   const IRPosition &IRP = getIRPosition();
6248   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6249   AAMemoryBehavior::StateType &S = getState();
6250 
6251   // First, check the function scope. We take the known information and we avoid
6252   // work if the assumed information implies the current assumed information for
6253   // this attribute. This is a valid for all but byval arguments.
6254   Argument *Arg = IRP.getAssociatedArgument();
6255   AAMemoryBehavior::base_t FnMemAssumedState =
6256       AAMemoryBehavior::StateType::getWorstState();
6257   if (!Arg || !Arg->hasByValAttr()) {
6258     const auto &FnMemAA =
6259         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6260     FnMemAssumedState = FnMemAA.getAssumed();
6261     S.addKnownBits(FnMemAA.getKnown());
6262     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6263       return ChangeStatus::UNCHANGED;
6264   }
6265 
6266   // Make sure the value is not captured (except through "return"), if
6267   // it is, any information derived would be irrelevant anyway as we cannot
6268   // check the potential aliases introduced by the capture. However, no need
6269   // to fall back to anythign less optimistic than the function state.
6270   const auto &ArgNoCaptureAA =
6271       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6272   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6273     S.intersectAssumedBits(FnMemAssumedState);
6274     return ChangeStatus::CHANGED;
6275   }
6276 
6277   // The current assumed state used to determine a change.
6278   auto AssumedState = S.getAssumed();
6279 
6280   // Liveness information to exclude dead users.
6281   // TODO: Take the FnPos once we have call site specific liveness information.
6282   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6283       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6284       DepClassTy::NONE);
6285 
6286   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6287   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6288     const Use *U = Uses[i];
6289     Instruction *UserI = cast<Instruction>(U->getUser());
6290     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6291                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6292                       << "]\n");
6293     if (A.isAssumedDead(*U, this, &LivenessAA))
6294       continue;
6295 
6296     // Droppable users, e.g., llvm::assume does not actually perform any action.
6297     if (UserI->isDroppable())
6298       continue;
6299 
6300     // Check if the users of UserI should also be visited.
6301     if (followUsersOfUseIn(A, U, UserI))
6302       addUsesOf(A, *UserI);
6303 
6304     // If UserI might touch memory we analyze the use in detail.
6305     if (UserI->mayReadOrWriteMemory())
6306       analyzeUseIn(A, U, UserI);
6307   }
6308 
6309   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6310                                         : ChangeStatus::UNCHANGED;
6311 }
6312 
6313 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6314   SmallVector<const Use *, 8> WL;
6315   for (const Use &U : V.uses())
6316     WL.push_back(&U);
6317 
6318   while (!WL.empty()) {
6319     const Use *U = WL.pop_back_val();
6320     if (!Visited.insert(U).second)
6321       continue;
6322 
6323     const Instruction *UserI = cast<Instruction>(U->getUser());
6324     if (UserI->mayReadOrWriteMemory()) {
6325       Uses.push_back(U);
6326       continue;
6327     }
6328     if (!followUsersOfUseIn(A, U, UserI))
6329       continue;
6330     for (const Use &UU : UserI->uses())
6331       WL.push_back(&UU);
6332   }
6333 }
6334 
6335 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6336                                                   const Instruction *UserI) {
6337   // The loaded value is unrelated to the pointer argument, no need to
6338   // follow the users of the load.
6339   if (isa<LoadInst>(UserI))
6340     return false;
6341 
6342   // By default we follow all uses assuming UserI might leak information on U,
6343   // we have special handling for call sites operands though.
6344   const auto *CB = dyn_cast<CallBase>(UserI);
6345   if (!CB || !CB->isArgOperand(U))
6346     return true;
6347 
6348   // If the use is a call argument known not to be captured, the users of
6349   // the call do not need to be visited because they have to be unrelated to
6350   // the input. Note that this check is not trivial even though we disallow
6351   // general capturing of the underlying argument. The reason is that the
6352   // call might the argument "through return", which we allow and for which we
6353   // need to check call users.
6354   if (U->get()->getType()->isPointerTy()) {
6355     unsigned ArgNo = CB->getArgOperandNo(U);
6356     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6357         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6358     return !ArgNoCaptureAA.isAssumedNoCapture();
6359   }
6360 
6361   return true;
6362 }
6363 
6364 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6365                                             const Instruction *UserI) {
6366   assert(UserI->mayReadOrWriteMemory());
6367 
6368   switch (UserI->getOpcode()) {
6369   default:
6370     // TODO: Handle all atomics and other side-effect operations we know of.
6371     break;
6372   case Instruction::Load:
6373     // Loads cause the NO_READS property to disappear.
6374     removeAssumedBits(NO_READS);
6375     return;
6376 
6377   case Instruction::Store:
6378     // Stores cause the NO_WRITES property to disappear if the use is the
6379     // pointer operand. Note that we do assume that capturing was taken care of
6380     // somewhere else.
6381     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6382       removeAssumedBits(NO_WRITES);
6383     return;
6384 
6385   case Instruction::Call:
6386   case Instruction::CallBr:
6387   case Instruction::Invoke: {
6388     // For call sites we look at the argument memory behavior attribute (this
6389     // could be recursive!) in order to restrict our own state.
6390     const auto *CB = cast<CallBase>(UserI);
6391 
6392     // Give up on operand bundles.
6393     if (CB->isBundleOperand(U)) {
6394       indicatePessimisticFixpoint();
6395       return;
6396     }
6397 
6398     // Calling a function does read the function pointer, maybe write it if the
6399     // function is self-modifying.
6400     if (CB->isCallee(U)) {
6401       removeAssumedBits(NO_READS);
6402       break;
6403     }
6404 
6405     // Adjust the possible access behavior based on the information on the
6406     // argument.
6407     IRPosition Pos;
6408     if (U->get()->getType()->isPointerTy())
6409       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6410     else
6411       Pos = IRPosition::callsite_function(*CB);
6412     const auto &MemBehaviorAA =
6413         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6414     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6415     // and at least "known".
6416     intersectAssumedBits(MemBehaviorAA.getAssumed());
6417     return;
6418   }
6419   };
6420 
6421   // Generally, look at the "may-properties" and adjust the assumed state if we
6422   // did not trigger special handling before.
6423   if (UserI->mayReadFromMemory())
6424     removeAssumedBits(NO_READS);
6425   if (UserI->mayWriteToMemory())
6426     removeAssumedBits(NO_WRITES);
6427 }
6428 
6429 } // namespace
6430 
6431 /// -------------------- Memory Locations Attributes ---------------------------
6432 /// Includes read-none, argmemonly, inaccessiblememonly,
6433 /// inaccessiblememorargmemonly
6434 /// ----------------------------------------------------------------------------
6435 
6436 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6437     AAMemoryLocation::MemoryLocationsKind MLK) {
6438   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6439     return "all memory";
6440   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6441     return "no memory";
6442   std::string S = "memory:";
6443   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6444     S += "stack,";
6445   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6446     S += "constant,";
6447   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6448     S += "internal global,";
6449   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6450     S += "external global,";
6451   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6452     S += "argument,";
6453   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6454     S += "inaccessible,";
6455   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6456     S += "malloced,";
6457   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6458     S += "unknown,";
6459   S.pop_back();
6460   return S;
6461 }
6462 
6463 namespace {
6464 struct AAMemoryLocationImpl : public AAMemoryLocation {
6465 
6466   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6467       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6468     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6469       AccessKind2Accesses[u] = nullptr;
6470   }
6471 
6472   ~AAMemoryLocationImpl() {
6473     // The AccessSets are allocated via a BumpPtrAllocator, we call
6474     // the destructor manually.
6475     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6476       if (AccessKind2Accesses[u])
6477         AccessKind2Accesses[u]->~AccessSet();
6478   }
6479 
6480   /// See AbstractAttribute::initialize(...).
6481   void initialize(Attributor &A) override {
6482     intersectAssumedBits(BEST_STATE);
6483     getKnownStateFromValue(A, getIRPosition(), getState());
6484     AAMemoryLocation::initialize(A);
6485   }
6486 
6487   /// Return the memory behavior information encoded in the IR for \p IRP.
6488   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6489                                      BitIntegerState &State,
6490                                      bool IgnoreSubsumingPositions = false) {
6491     // For internal functions we ignore `argmemonly` and
6492     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6493     // constant propagation. It is unclear if this is the best way but it is
6494     // unlikely this will cause real performance problems. If we are deriving
6495     // attributes for the anchor function we even remove the attribute in
6496     // addition to ignoring it.
6497     bool UseArgMemOnly = true;
6498     Function *AnchorFn = IRP.getAnchorScope();
6499     if (AnchorFn && A.isRunOn(*AnchorFn))
6500       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6501 
6502     SmallVector<Attribute, 2> Attrs;
6503     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6504     for (const Attribute &Attr : Attrs) {
6505       switch (Attr.getKindAsEnum()) {
6506       case Attribute::ReadNone:
6507         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6508         break;
6509       case Attribute::InaccessibleMemOnly:
6510         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6511         break;
6512       case Attribute::ArgMemOnly:
6513         if (UseArgMemOnly)
6514           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6515         else
6516           IRP.removeAttrs({Attribute::ArgMemOnly});
6517         break;
6518       case Attribute::InaccessibleMemOrArgMemOnly:
6519         if (UseArgMemOnly)
6520           State.addKnownBits(inverseLocation(
6521               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6522         else
6523           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6524         break;
6525       default:
6526         llvm_unreachable("Unexpected attribute!");
6527       }
6528     }
6529   }
6530 
6531   /// See AbstractAttribute::getDeducedAttributes(...).
6532   void getDeducedAttributes(LLVMContext &Ctx,
6533                             SmallVectorImpl<Attribute> &Attrs) const override {
6534     assert(Attrs.size() == 0);
6535     if (isAssumedReadNone()) {
6536       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6537     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6538       if (isAssumedInaccessibleMemOnly())
6539         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6540       else if (isAssumedArgMemOnly())
6541         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6542       else if (isAssumedInaccessibleOrArgMemOnly())
6543         Attrs.push_back(
6544             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6545     }
6546     assert(Attrs.size() <= 1);
6547   }
6548 
6549   /// See AbstractAttribute::manifest(...).
6550   ChangeStatus manifest(Attributor &A) override {
6551     const IRPosition &IRP = getIRPosition();
6552 
6553     // Check if we would improve the existing attributes first.
6554     SmallVector<Attribute, 4> DeducedAttrs;
6555     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6556     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6557           return IRP.hasAttr(Attr.getKindAsEnum(),
6558                              /* IgnoreSubsumingPositions */ true);
6559         }))
6560       return ChangeStatus::UNCHANGED;
6561 
6562     // Clear existing attributes.
6563     IRP.removeAttrs(AttrKinds);
6564     if (isAssumedReadNone())
6565       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6566 
6567     // Use the generic manifest method.
6568     return IRAttribute::manifest(A);
6569   }
6570 
6571   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6572   bool checkForAllAccessesToMemoryKind(
6573       function_ref<bool(const Instruction *, const Value *, AccessKind,
6574                         MemoryLocationsKind)>
6575           Pred,
6576       MemoryLocationsKind RequestedMLK) const override {
6577     if (!isValidState())
6578       return false;
6579 
6580     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6581     if (AssumedMLK == NO_LOCATIONS)
6582       return true;
6583 
6584     unsigned Idx = 0;
6585     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6586          CurMLK *= 2, ++Idx) {
6587       if (CurMLK & RequestedMLK)
6588         continue;
6589 
6590       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6591         for (const AccessInfo &AI : *Accesses)
6592           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6593             return false;
6594     }
6595 
6596     return true;
6597   }
6598 
6599   ChangeStatus indicatePessimisticFixpoint() override {
6600     // If we give up and indicate a pessimistic fixpoint this instruction will
6601     // become an access for all potential access kinds:
6602     // TODO: Add pointers for argmemonly and globals to improve the results of
6603     //       checkForAllAccessesToMemoryKind.
6604     bool Changed = false;
6605     MemoryLocationsKind KnownMLK = getKnown();
6606     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6607     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6608       if (!(CurMLK & KnownMLK))
6609         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6610                                   getAccessKindFromInst(I));
6611     return AAMemoryLocation::indicatePessimisticFixpoint();
6612   }
6613 
6614 protected:
6615   /// Helper struct to tie together an instruction that has a read or write
6616   /// effect with the pointer it accesses (if any).
6617   struct AccessInfo {
6618 
6619     /// The instruction that caused the access.
6620     const Instruction *I;
6621 
6622     /// The base pointer that is accessed, or null if unknown.
6623     const Value *Ptr;
6624 
6625     /// The kind of access (read/write/read+write).
6626     AccessKind Kind;
6627 
6628     bool operator==(const AccessInfo &RHS) const {
6629       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6630     }
6631     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6632       if (LHS.I != RHS.I)
6633         return LHS.I < RHS.I;
6634       if (LHS.Ptr != RHS.Ptr)
6635         return LHS.Ptr < RHS.Ptr;
6636       if (LHS.Kind != RHS.Kind)
6637         return LHS.Kind < RHS.Kind;
6638       return false;
6639     }
6640   };
6641 
6642   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6643   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6644   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6645   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6646 
6647   /// Categorize the pointer arguments of CB that might access memory in
6648   /// AccessedLoc and update the state and access map accordingly.
6649   void
6650   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6651                                      AAMemoryLocation::StateType &AccessedLocs,
6652                                      bool &Changed);
6653 
6654   /// Return the kind(s) of location that may be accessed by \p V.
6655   AAMemoryLocation::MemoryLocationsKind
6656   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6657 
6658   /// Return the access kind as determined by \p I.
6659   AccessKind getAccessKindFromInst(const Instruction *I) {
6660     AccessKind AK = READ_WRITE;
6661     if (I) {
6662       AK = I->mayReadFromMemory() ? READ : NONE;
6663       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6664     }
6665     return AK;
6666   }
6667 
6668   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6669   /// an access of kind \p AK to a \p MLK memory location with the access
6670   /// pointer \p Ptr.
6671   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6672                                  MemoryLocationsKind MLK, const Instruction *I,
6673                                  const Value *Ptr, bool &Changed,
6674                                  AccessKind AK = READ_WRITE) {
6675 
6676     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6677     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6678     if (!Accesses)
6679       Accesses = new (Allocator) AccessSet();
6680     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6681     State.removeAssumedBits(MLK);
6682   }
6683 
6684   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6685   /// arguments, and update the state and access map accordingly.
6686   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6687                           AAMemoryLocation::StateType &State, bool &Changed);
6688 
6689   /// Used to allocate access sets.
6690   BumpPtrAllocator &Allocator;
6691 
6692   /// The set of IR attributes AAMemoryLocation deals with.
6693   static const Attribute::AttrKind AttrKinds[4];
6694 };
6695 
6696 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6697     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6698     Attribute::InaccessibleMemOrArgMemOnly};
6699 
6700 void AAMemoryLocationImpl::categorizePtrValue(
6701     Attributor &A, const Instruction &I, const Value &Ptr,
6702     AAMemoryLocation::StateType &State, bool &Changed) {
6703   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6704                     << Ptr << " ["
6705                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6706 
6707   auto StripGEPCB = [](Value *V) -> Value * {
6708     auto *GEP = dyn_cast<GEPOperator>(V);
6709     while (GEP) {
6710       V = GEP->getPointerOperand();
6711       GEP = dyn_cast<GEPOperator>(V);
6712     }
6713     return V;
6714   };
6715 
6716   auto VisitValueCB = [&](Value &V, const Instruction *,
6717                           AAMemoryLocation::StateType &T,
6718                           bool Stripped) -> bool {
6719     // TODO: recognize the TBAA used for constant accesses.
6720     MemoryLocationsKind MLK = NO_LOCATIONS;
6721     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6722     if (isa<UndefValue>(V))
6723       return true;
6724     if (auto *Arg = dyn_cast<Argument>(&V)) {
6725       if (Arg->hasByValAttr())
6726         MLK = NO_LOCAL_MEM;
6727       else
6728         MLK = NO_ARGUMENT_MEM;
6729     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6730       // Reading constant memory is not treated as a read "effect" by the
6731       // function attr pass so we won't neither. Constants defined by TBAA are
6732       // similar. (We know we do not write it because it is constant.)
6733       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6734         if (GVar->isConstant())
6735           return true;
6736 
6737       if (GV->hasLocalLinkage())
6738         MLK = NO_GLOBAL_INTERNAL_MEM;
6739       else
6740         MLK = NO_GLOBAL_EXTERNAL_MEM;
6741     } else if (isa<ConstantPointerNull>(V) &&
6742                !NullPointerIsDefined(getAssociatedFunction(),
6743                                      V.getType()->getPointerAddressSpace())) {
6744       return true;
6745     } else if (isa<AllocaInst>(V)) {
6746       MLK = NO_LOCAL_MEM;
6747     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6748       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6749           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6750       if (NoAliasAA.isAssumedNoAlias())
6751         MLK = NO_MALLOCED_MEM;
6752       else
6753         MLK = NO_UNKOWN_MEM;
6754     } else {
6755       MLK = NO_UNKOWN_MEM;
6756     }
6757 
6758     assert(MLK != NO_LOCATIONS && "No location specified!");
6759     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6760                               getAccessKindFromInst(&I));
6761     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6762                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6763                       << "\n");
6764     return true;
6765   };
6766 
6767   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6768           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6769           /* UseValueSimplify */ true,
6770           /* MaxValues */ 32, StripGEPCB)) {
6771     LLVM_DEBUG(
6772         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6773     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6774                               getAccessKindFromInst(&I));
6775   } else {
6776     LLVM_DEBUG(
6777         dbgs()
6778         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6779         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6780   }
6781 }
6782 
6783 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6784     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6785     bool &Changed) {
6786   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6787 
6788     // Skip non-pointer arguments.
6789     const Value *ArgOp = CB.getArgOperand(ArgNo);
6790     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6791       continue;
6792 
6793     // Skip readnone arguments.
6794     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6795     const auto &ArgOpMemLocationAA =
6796         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6797 
6798     if (ArgOpMemLocationAA.isAssumedReadNone())
6799       continue;
6800 
6801     // Categorize potentially accessed pointer arguments as if there was an
6802     // access instruction with them as pointer.
6803     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6804   }
6805 }
6806 
6807 AAMemoryLocation::MemoryLocationsKind
6808 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6809                                                   bool &Changed) {
6810   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6811                     << I << "\n");
6812 
6813   AAMemoryLocation::StateType AccessedLocs;
6814   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6815 
6816   if (auto *CB = dyn_cast<CallBase>(&I)) {
6817 
6818     // First check if we assume any memory is access is visible.
6819     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6820         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6821     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6822                       << " [" << CBMemLocationAA << "]\n");
6823 
6824     if (CBMemLocationAA.isAssumedReadNone())
6825       return NO_LOCATIONS;
6826 
6827     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6828       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6829                                 Changed, getAccessKindFromInst(&I));
6830       return AccessedLocs.getAssumed();
6831     }
6832 
6833     uint32_t CBAssumedNotAccessedLocs =
6834         CBMemLocationAA.getAssumedNotAccessedLocation();
6835 
6836     // Set the argmemonly and global bit as we handle them separately below.
6837     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6838         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6839 
6840     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6841       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6842         continue;
6843       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6844                                 getAccessKindFromInst(&I));
6845     }
6846 
6847     // Now handle global memory if it might be accessed. This is slightly tricky
6848     // as NO_GLOBAL_MEM has multiple bits set.
6849     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6850     if (HasGlobalAccesses) {
6851       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6852                             AccessKind Kind, MemoryLocationsKind MLK) {
6853         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6854                                   getAccessKindFromInst(&I));
6855         return true;
6856       };
6857       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6858               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6859         return AccessedLocs.getWorstState();
6860     }
6861 
6862     LLVM_DEBUG(
6863         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6864                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6865 
6866     // Now handle argument memory if it might be accessed.
6867     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6868     if (HasArgAccesses)
6869       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6870 
6871     LLVM_DEBUG(
6872         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6873                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6874 
6875     return AccessedLocs.getAssumed();
6876   }
6877 
6878   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6879     LLVM_DEBUG(
6880         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6881                << I << " [" << *Ptr << "]\n");
6882     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6883     return AccessedLocs.getAssumed();
6884   }
6885 
6886   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6887                     << I << "\n");
6888   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6889                             getAccessKindFromInst(&I));
6890   return AccessedLocs.getAssumed();
6891 }
6892 
6893 /// An AA to represent the memory behavior function attributes.
6894 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6895   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6896       : AAMemoryLocationImpl(IRP, A) {}
6897 
6898   /// See AbstractAttribute::updateImpl(Attributor &A).
6899   virtual ChangeStatus updateImpl(Attributor &A) override {
6900 
6901     const auto &MemBehaviorAA =
6902         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6903     if (MemBehaviorAA.isAssumedReadNone()) {
6904       if (MemBehaviorAA.isKnownReadNone())
6905         return indicateOptimisticFixpoint();
6906       assert(isAssumedReadNone() &&
6907              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6908       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6909       return ChangeStatus::UNCHANGED;
6910     }
6911 
6912     // The current assumed state used to determine a change.
6913     auto AssumedState = getAssumed();
6914     bool Changed = false;
6915 
6916     auto CheckRWInst = [&](Instruction &I) {
6917       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6918       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6919                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6920       removeAssumedBits(inverseLocation(MLK, false, false));
6921       // Stop once only the valid bit set in the *not assumed location*, thus
6922       // once we don't actually exclude any memory locations in the state.
6923       return getAssumedNotAccessedLocation() != VALID_STATE;
6924     };
6925 
6926     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6927       return indicatePessimisticFixpoint();
6928 
6929     Changed |= AssumedState != getAssumed();
6930     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6931   }
6932 
6933   /// See AbstractAttribute::trackStatistics()
6934   void trackStatistics() const override {
6935     if (isAssumedReadNone())
6936       STATS_DECLTRACK_FN_ATTR(readnone)
6937     else if (isAssumedArgMemOnly())
6938       STATS_DECLTRACK_FN_ATTR(argmemonly)
6939     else if (isAssumedInaccessibleMemOnly())
6940       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6941     else if (isAssumedInaccessibleOrArgMemOnly())
6942       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6943   }
6944 };
6945 
6946 /// AAMemoryLocation attribute for call sites.
6947 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6948   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6949       : AAMemoryLocationImpl(IRP, A) {}
6950 
6951   /// See AbstractAttribute::initialize(...).
6952   void initialize(Attributor &A) override {
6953     AAMemoryLocationImpl::initialize(A);
6954     Function *F = getAssociatedFunction();
6955     if (!F || F->isDeclaration())
6956       indicatePessimisticFixpoint();
6957   }
6958 
6959   /// See AbstractAttribute::updateImpl(...).
6960   ChangeStatus updateImpl(Attributor &A) override {
6961     // TODO: Once we have call site specific value information we can provide
6962     //       call site specific liveness liveness information and then it makes
6963     //       sense to specialize attributes for call sites arguments instead of
6964     //       redirecting requests to the callee argument.
6965     Function *F = getAssociatedFunction();
6966     const IRPosition &FnPos = IRPosition::function(*F);
6967     auto &FnAA =
6968         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6969     bool Changed = false;
6970     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6971                           AccessKind Kind, MemoryLocationsKind MLK) {
6972       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6973                                 getAccessKindFromInst(I));
6974       return true;
6975     };
6976     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6977       return indicatePessimisticFixpoint();
6978     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6979   }
6980 
6981   /// See AbstractAttribute::trackStatistics()
6982   void trackStatistics() const override {
6983     if (isAssumedReadNone())
6984       STATS_DECLTRACK_CS_ATTR(readnone)
6985   }
6986 };
6987 
6988 /// ------------------ Value Constant Range Attribute -------------------------
6989 
6990 struct AAValueConstantRangeImpl : AAValueConstantRange {
6991   using StateType = IntegerRangeState;
6992   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6993       : AAValueConstantRange(IRP, A) {}
6994 
6995   /// See AbstractAttribute::getAsStr().
6996   const std::string getAsStr() const override {
6997     std::string Str;
6998     llvm::raw_string_ostream OS(Str);
6999     OS << "range(" << getBitWidth() << ")<";
7000     getKnown().print(OS);
7001     OS << " / ";
7002     getAssumed().print(OS);
7003     OS << ">";
7004     return OS.str();
7005   }
7006 
7007   /// Helper function to get a SCEV expr for the associated value at program
7008   /// point \p I.
7009   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7010     if (!getAnchorScope())
7011       return nullptr;
7012 
7013     ScalarEvolution *SE =
7014         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7015             *getAnchorScope());
7016 
7017     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7018         *getAnchorScope());
7019 
7020     if (!SE || !LI)
7021       return nullptr;
7022 
7023     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7024     if (!I)
7025       return S;
7026 
7027     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7028   }
7029 
7030   /// Helper function to get a range from SCEV for the associated value at
7031   /// program point \p I.
7032   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7033                                          const Instruction *I = nullptr) const {
7034     if (!getAnchorScope())
7035       return getWorstState(getBitWidth());
7036 
7037     ScalarEvolution *SE =
7038         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7039             *getAnchorScope());
7040 
7041     const SCEV *S = getSCEV(A, I);
7042     if (!SE || !S)
7043       return getWorstState(getBitWidth());
7044 
7045     return SE->getUnsignedRange(S);
7046   }
7047 
7048   /// Helper function to get a range from LVI for the associated value at
7049   /// program point \p I.
7050   ConstantRange
7051   getConstantRangeFromLVI(Attributor &A,
7052                           const Instruction *CtxI = nullptr) const {
7053     if (!getAnchorScope())
7054       return getWorstState(getBitWidth());
7055 
7056     LazyValueInfo *LVI =
7057         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7058             *getAnchorScope());
7059 
7060     if (!LVI || !CtxI)
7061       return getWorstState(getBitWidth());
7062     return LVI->getConstantRange(&getAssociatedValue(),
7063                                  const_cast<Instruction *>(CtxI));
7064   }
7065 
7066   /// See AAValueConstantRange::getKnownConstantRange(..).
7067   ConstantRange
7068   getKnownConstantRange(Attributor &A,
7069                         const Instruction *CtxI = nullptr) const override {
7070     if (!CtxI || CtxI == getCtxI())
7071       return getKnown();
7072 
7073     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7074     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7075     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7076   }
7077 
7078   /// See AAValueConstantRange::getAssumedConstantRange(..).
7079   ConstantRange
7080   getAssumedConstantRange(Attributor &A,
7081                           const Instruction *CtxI = nullptr) const override {
7082     // TODO: Make SCEV use Attributor assumption.
7083     //       We may be able to bound a variable range via assumptions in
7084     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7085     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7086 
7087     if (!CtxI || CtxI == getCtxI())
7088       return getAssumed();
7089 
7090     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7091     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7092     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7093   }
7094 
7095   /// See AbstractAttribute::initialize(..).
7096   void initialize(Attributor &A) override {
7097     // Intersect a range given by SCEV.
7098     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7099 
7100     // Intersect a range given by LVI.
7101     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7102   }
7103 
7104   /// Helper function to create MDNode for range metadata.
7105   static MDNode *
7106   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7107                             const ConstantRange &AssumedConstantRange) {
7108     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7109                                   Ty, AssumedConstantRange.getLower())),
7110                               ConstantAsMetadata::get(ConstantInt::get(
7111                                   Ty, AssumedConstantRange.getUpper()))};
7112     return MDNode::get(Ctx, LowAndHigh);
7113   }
7114 
7115   /// Return true if \p Assumed is included in \p KnownRanges.
7116   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7117 
7118     if (Assumed.isFullSet())
7119       return false;
7120 
7121     if (!KnownRanges)
7122       return true;
7123 
7124     // If multiple ranges are annotated in IR, we give up to annotate assumed
7125     // range for now.
7126 
7127     // TODO:  If there exists a known range which containts assumed range, we
7128     // can say assumed range is better.
7129     if (KnownRanges->getNumOperands() > 2)
7130       return false;
7131 
7132     ConstantInt *Lower =
7133         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7134     ConstantInt *Upper =
7135         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7136 
7137     ConstantRange Known(Lower->getValue(), Upper->getValue());
7138     return Known.contains(Assumed) && Known != Assumed;
7139   }
7140 
7141   /// Helper function to set range metadata.
7142   static bool
7143   setRangeMetadataIfisBetterRange(Instruction *I,
7144                                   const ConstantRange &AssumedConstantRange) {
7145     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7146     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7147       if (!AssumedConstantRange.isEmptySet()) {
7148         I->setMetadata(LLVMContext::MD_range,
7149                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7150                                                  AssumedConstantRange));
7151         return true;
7152       }
7153     }
7154     return false;
7155   }
7156 
7157   /// See AbstractAttribute::manifest()
7158   ChangeStatus manifest(Attributor &A) override {
7159     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7160     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7161     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7162 
7163     auto &V = getAssociatedValue();
7164     if (!AssumedConstantRange.isEmptySet() &&
7165         !AssumedConstantRange.isSingleElement()) {
7166       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7167         assert(I == getCtxI() && "Should not annotate an instruction which is "
7168                                  "not the context instruction");
7169         if (isa<CallInst>(I) || isa<LoadInst>(I))
7170           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7171             Changed = ChangeStatus::CHANGED;
7172       }
7173     }
7174 
7175     return Changed;
7176   }
7177 };
7178 
7179 struct AAValueConstantRangeArgument final
7180     : AAArgumentFromCallSiteArguments<
7181           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7182           true /* BridgeCallBaseContext */> {
7183   using Base = AAArgumentFromCallSiteArguments<
7184       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7185       true /* BridgeCallBaseContext */>;
7186   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7187       : Base(IRP, A) {}
7188 
7189   /// See AbstractAttribute::initialize(..).
7190   void initialize(Attributor &A) override {
7191     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7192       indicatePessimisticFixpoint();
7193     } else {
7194       Base::initialize(A);
7195     }
7196   }
7197 
7198   /// See AbstractAttribute::trackStatistics()
7199   void trackStatistics() const override {
7200     STATS_DECLTRACK_ARG_ATTR(value_range)
7201   }
7202 };
7203 
7204 struct AAValueConstantRangeReturned
7205     : AAReturnedFromReturnedValues<AAValueConstantRange,
7206                                    AAValueConstantRangeImpl,
7207                                    AAValueConstantRangeImpl::StateType,
7208                                    /* PropogateCallBaseContext */ true> {
7209   using Base =
7210       AAReturnedFromReturnedValues<AAValueConstantRange,
7211                                    AAValueConstantRangeImpl,
7212                                    AAValueConstantRangeImpl::StateType,
7213                                    /* PropogateCallBaseContext */ true>;
7214   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7215       : Base(IRP, A) {}
7216 
7217   /// See AbstractAttribute::initialize(...).
7218   void initialize(Attributor &A) override {}
7219 
7220   /// See AbstractAttribute::trackStatistics()
7221   void trackStatistics() const override {
7222     STATS_DECLTRACK_FNRET_ATTR(value_range)
7223   }
7224 };
7225 
7226 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7227   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7228       : AAValueConstantRangeImpl(IRP, A) {}
7229 
7230   /// See AbstractAttribute::initialize(...).
7231   void initialize(Attributor &A) override {
7232     AAValueConstantRangeImpl::initialize(A);
7233     Value &V = getAssociatedValue();
7234 
7235     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7236       unionAssumed(ConstantRange(C->getValue()));
7237       indicateOptimisticFixpoint();
7238       return;
7239     }
7240 
7241     if (isa<UndefValue>(&V)) {
7242       // Collapse the undef state to 0.
7243       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7244       indicateOptimisticFixpoint();
7245       return;
7246     }
7247 
7248     if (isa<CallBase>(&V))
7249       return;
7250 
7251     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7252       return;
7253     // If it is a load instruction with range metadata, use it.
7254     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7255       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7256         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7257         return;
7258       }
7259 
7260     // We can work with PHI and select instruction as we traverse their operands
7261     // during update.
7262     if (isa<SelectInst>(V) || isa<PHINode>(V))
7263       return;
7264 
7265     // Otherwise we give up.
7266     indicatePessimisticFixpoint();
7267 
7268     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7269                       << getAssociatedValue() << "\n");
7270   }
7271 
7272   bool calculateBinaryOperator(
7273       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7274       const Instruction *CtxI,
7275       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7276     Value *LHS = BinOp->getOperand(0);
7277     Value *RHS = BinOp->getOperand(1);
7278     // TODO: Allow non integers as well.
7279     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7280       return false;
7281 
7282     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7283         *this, IRPosition::value(*LHS, getCallBaseContext()),
7284         DepClassTy::REQUIRED);
7285     QuerriedAAs.push_back(&LHSAA);
7286     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7287 
7288     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7289         *this, IRPosition::value(*RHS, getCallBaseContext()),
7290         DepClassTy::REQUIRED);
7291     QuerriedAAs.push_back(&RHSAA);
7292     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7293 
7294     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7295 
7296     T.unionAssumed(AssumedRange);
7297 
7298     // TODO: Track a known state too.
7299 
7300     return T.isValidState();
7301   }
7302 
7303   bool calculateCastInst(
7304       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7305       const Instruction *CtxI,
7306       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7307     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7308     // TODO: Allow non integers as well.
7309     Value &OpV = *CastI->getOperand(0);
7310     if (!OpV.getType()->isIntegerTy())
7311       return false;
7312 
7313     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7314         *this, IRPosition::value(OpV, getCallBaseContext()),
7315         DepClassTy::REQUIRED);
7316     QuerriedAAs.push_back(&OpAA);
7317     T.unionAssumed(
7318         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7319     return T.isValidState();
7320   }
7321 
7322   bool
7323   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7324                    const Instruction *CtxI,
7325                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7326     Value *LHS = CmpI->getOperand(0);
7327     Value *RHS = CmpI->getOperand(1);
7328     // TODO: Allow non integers as well.
7329     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7330       return false;
7331 
7332     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7333         *this, IRPosition::value(*LHS, getCallBaseContext()),
7334         DepClassTy::REQUIRED);
7335     QuerriedAAs.push_back(&LHSAA);
7336     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7337         *this, IRPosition::value(*RHS, getCallBaseContext()),
7338         DepClassTy::REQUIRED);
7339     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7340     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7341 
7342     // If one of them is empty set, we can't decide.
7343     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7344       return true;
7345 
7346     bool MustTrue = false, MustFalse = false;
7347 
7348     auto AllowedRegion =
7349         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7350 
7351     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7352         CmpI->getPredicate(), RHSAARange);
7353 
7354     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7355       MustFalse = true;
7356 
7357     if (SatisfyingRegion.contains(LHSAARange))
7358       MustTrue = true;
7359 
7360     assert((!MustTrue || !MustFalse) &&
7361            "Either MustTrue or MustFalse should be false!");
7362 
7363     if (MustTrue)
7364       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7365     else if (MustFalse)
7366       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7367     else
7368       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7369 
7370     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7371                       << " " << RHSAA << "\n");
7372 
7373     // TODO: Track a known state too.
7374     return T.isValidState();
7375   }
7376 
7377   /// See AbstractAttribute::updateImpl(...).
7378   ChangeStatus updateImpl(Attributor &A) override {
7379     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7380                             IntegerRangeState &T, bool Stripped) -> bool {
7381       Instruction *I = dyn_cast<Instruction>(&V);
7382       if (!I || isa<CallBase>(I)) {
7383 
7384         // If the value is not instruction, we query AA to Attributor.
7385         const auto &AA = A.getAAFor<AAValueConstantRange>(
7386             *this, IRPosition::value(V), DepClassTy::REQUIRED);
7387 
7388         // Clamp operator is not used to utilize a program point CtxI.
7389         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7390 
7391         return T.isValidState();
7392       }
7393 
7394       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7395       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7396         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7397           return false;
7398       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7399         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7400           return false;
7401       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7402         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7403           return false;
7404       } else {
7405         // Give up with other instructions.
7406         // TODO: Add other instructions
7407 
7408         T.indicatePessimisticFixpoint();
7409         return false;
7410       }
7411 
7412       // Catch circular reasoning in a pessimistic way for now.
7413       // TODO: Check how the range evolves and if we stripped anything, see also
7414       //       AADereferenceable or AAAlign for similar situations.
7415       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7416         if (QueriedAA != this)
7417           continue;
7418         // If we are in a stady state we do not need to worry.
7419         if (T.getAssumed() == getState().getAssumed())
7420           continue;
7421         T.indicatePessimisticFixpoint();
7422       }
7423 
7424       return T.isValidState();
7425     };
7426 
7427     IntegerRangeState T(getBitWidth());
7428 
7429     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7430             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7431             /* UseValueSimplify */ false))
7432       return indicatePessimisticFixpoint();
7433 
7434     return clampStateAndIndicateChange(getState(), T);
7435   }
7436 
7437   /// See AbstractAttribute::trackStatistics()
7438   void trackStatistics() const override {
7439     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7440   }
7441 };
7442 
7443 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7444   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7445       : AAValueConstantRangeImpl(IRP, A) {}
7446 
7447   /// See AbstractAttribute::initialize(...).
7448   ChangeStatus updateImpl(Attributor &A) override {
7449     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7450                      "not be called");
7451   }
7452 
7453   /// See AbstractAttribute::trackStatistics()
7454   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7455 };
7456 
7457 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7458   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7459       : AAValueConstantRangeFunction(IRP, A) {}
7460 
7461   /// See AbstractAttribute::trackStatistics()
7462   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7463 };
7464 
7465 struct AAValueConstantRangeCallSiteReturned
7466     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7467                                      AAValueConstantRangeImpl,
7468                                      AAValueConstantRangeImpl::StateType,
7469                                      /* IntroduceCallBaseContext */ true> {
7470   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7471       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7472                                        AAValueConstantRangeImpl,
7473                                        AAValueConstantRangeImpl::StateType,
7474                                        /* IntroduceCallBaseContext */ true>(IRP,
7475                                                                             A) {
7476   }
7477 
7478   /// See AbstractAttribute::initialize(...).
7479   void initialize(Attributor &A) override {
7480     // If it is a load instruction with range metadata, use the metadata.
7481     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7482       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7483         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7484 
7485     AAValueConstantRangeImpl::initialize(A);
7486   }
7487 
7488   /// See AbstractAttribute::trackStatistics()
7489   void trackStatistics() const override {
7490     STATS_DECLTRACK_CSRET_ATTR(value_range)
7491   }
7492 };
7493 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7494   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7495       : AAValueConstantRangeFloating(IRP, A) {}
7496 
7497   /// See AbstractAttribute::manifest()
7498   ChangeStatus manifest(Attributor &A) override {
7499     return ChangeStatus::UNCHANGED;
7500   }
7501 
7502   /// See AbstractAttribute::trackStatistics()
7503   void trackStatistics() const override {
7504     STATS_DECLTRACK_CSARG_ATTR(value_range)
7505   }
7506 };
7507 
7508 /// ------------------ Potential Values Attribute -------------------------
7509 
7510 struct AAPotentialValuesImpl : AAPotentialValues {
7511   using StateType = PotentialConstantIntValuesState;
7512 
7513   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7514       : AAPotentialValues(IRP, A) {}
7515 
7516   /// See AbstractAttribute::getAsStr().
7517   const std::string getAsStr() const override {
7518     std::string Str;
7519     llvm::raw_string_ostream OS(Str);
7520     OS << getState();
7521     return OS.str();
7522   }
7523 
7524   /// See AbstractAttribute::updateImpl(...).
7525   ChangeStatus updateImpl(Attributor &A) override {
7526     return indicatePessimisticFixpoint();
7527   }
7528 };
7529 
7530 struct AAPotentialValuesArgument final
7531     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7532                                       PotentialConstantIntValuesState> {
7533   using Base =
7534       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7535                                       PotentialConstantIntValuesState>;
7536   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7537       : Base(IRP, A) {}
7538 
7539   /// See AbstractAttribute::initialize(..).
7540   void initialize(Attributor &A) override {
7541     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7542       indicatePessimisticFixpoint();
7543     } else {
7544       Base::initialize(A);
7545     }
7546   }
7547 
7548   /// See AbstractAttribute::trackStatistics()
7549   void trackStatistics() const override {
7550     STATS_DECLTRACK_ARG_ATTR(potential_values)
7551   }
7552 };
7553 
7554 struct AAPotentialValuesReturned
7555     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7556   using Base =
7557       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7558   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7559       : Base(IRP, A) {}
7560 
7561   /// See AbstractAttribute::trackStatistics()
7562   void trackStatistics() const override {
7563     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7564   }
7565 };
7566 
7567 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7568   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7569       : AAPotentialValuesImpl(IRP, A) {}
7570 
7571   /// See AbstractAttribute::initialize(..).
7572   void initialize(Attributor &A) override {
7573     Value &V = getAssociatedValue();
7574 
7575     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7576       unionAssumed(C->getValue());
7577       indicateOptimisticFixpoint();
7578       return;
7579     }
7580 
7581     if (isa<UndefValue>(&V)) {
7582       unionAssumedWithUndef();
7583       indicateOptimisticFixpoint();
7584       return;
7585     }
7586 
7587     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7588       return;
7589 
7590     if (isa<SelectInst>(V) || isa<PHINode>(V))
7591       return;
7592 
7593     indicatePessimisticFixpoint();
7594 
7595     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7596                       << getAssociatedValue() << "\n");
7597   }
7598 
7599   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7600                                 const APInt &RHS) {
7601     ICmpInst::Predicate Pred = ICI->getPredicate();
7602     switch (Pred) {
7603     case ICmpInst::ICMP_UGT:
7604       return LHS.ugt(RHS);
7605     case ICmpInst::ICMP_SGT:
7606       return LHS.sgt(RHS);
7607     case ICmpInst::ICMP_EQ:
7608       return LHS.eq(RHS);
7609     case ICmpInst::ICMP_UGE:
7610       return LHS.uge(RHS);
7611     case ICmpInst::ICMP_SGE:
7612       return LHS.sge(RHS);
7613     case ICmpInst::ICMP_ULT:
7614       return LHS.ult(RHS);
7615     case ICmpInst::ICMP_SLT:
7616       return LHS.slt(RHS);
7617     case ICmpInst::ICMP_NE:
7618       return LHS.ne(RHS);
7619     case ICmpInst::ICMP_ULE:
7620       return LHS.ule(RHS);
7621     case ICmpInst::ICMP_SLE:
7622       return LHS.sle(RHS);
7623     default:
7624       llvm_unreachable("Invalid ICmp predicate!");
7625     }
7626   }
7627 
7628   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7629                                  uint32_t ResultBitWidth) {
7630     Instruction::CastOps CastOp = CI->getOpcode();
7631     switch (CastOp) {
7632     default:
7633       llvm_unreachable("unsupported or not integer cast");
7634     case Instruction::Trunc:
7635       return Src.trunc(ResultBitWidth);
7636     case Instruction::SExt:
7637       return Src.sext(ResultBitWidth);
7638     case Instruction::ZExt:
7639       return Src.zext(ResultBitWidth);
7640     case Instruction::BitCast:
7641       return Src;
7642     }
7643   }
7644 
7645   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7646                                        const APInt &LHS, const APInt &RHS,
7647                                        bool &SkipOperation, bool &Unsupported) {
7648     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7649     // Unsupported is set to true when the binary operator is not supported.
7650     // SkipOperation is set to true when UB occur with the given operand pair
7651     // (LHS, RHS).
7652     // TODO: we should look at nsw and nuw keywords to handle operations
7653     //       that create poison or undef value.
7654     switch (BinOpcode) {
7655     default:
7656       Unsupported = true;
7657       return LHS;
7658     case Instruction::Add:
7659       return LHS + RHS;
7660     case Instruction::Sub:
7661       return LHS - RHS;
7662     case Instruction::Mul:
7663       return LHS * RHS;
7664     case Instruction::UDiv:
7665       if (RHS.isNullValue()) {
7666         SkipOperation = true;
7667         return LHS;
7668       }
7669       return LHS.udiv(RHS);
7670     case Instruction::SDiv:
7671       if (RHS.isNullValue()) {
7672         SkipOperation = true;
7673         return LHS;
7674       }
7675       return LHS.sdiv(RHS);
7676     case Instruction::URem:
7677       if (RHS.isNullValue()) {
7678         SkipOperation = true;
7679         return LHS;
7680       }
7681       return LHS.urem(RHS);
7682     case Instruction::SRem:
7683       if (RHS.isNullValue()) {
7684         SkipOperation = true;
7685         return LHS;
7686       }
7687       return LHS.srem(RHS);
7688     case Instruction::Shl:
7689       return LHS.shl(RHS);
7690     case Instruction::LShr:
7691       return LHS.lshr(RHS);
7692     case Instruction::AShr:
7693       return LHS.ashr(RHS);
7694     case Instruction::And:
7695       return LHS & RHS;
7696     case Instruction::Or:
7697       return LHS | RHS;
7698     case Instruction::Xor:
7699       return LHS ^ RHS;
7700     }
7701   }
7702 
7703   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7704                                            const APInt &LHS, const APInt &RHS) {
7705     bool SkipOperation = false;
7706     bool Unsupported = false;
7707     APInt Result =
7708         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7709     if (Unsupported)
7710       return false;
7711     // If SkipOperation is true, we can ignore this operand pair (L, R).
7712     if (!SkipOperation)
7713       unionAssumed(Result);
7714     return isValidState();
7715   }
7716 
7717   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7718     auto AssumedBefore = getAssumed();
7719     Value *LHS = ICI->getOperand(0);
7720     Value *RHS = ICI->getOperand(1);
7721     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7722       return indicatePessimisticFixpoint();
7723 
7724     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7725                                                 DepClassTy::REQUIRED);
7726     if (!LHSAA.isValidState())
7727       return indicatePessimisticFixpoint();
7728 
7729     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7730                                                 DepClassTy::REQUIRED);
7731     if (!RHSAA.isValidState())
7732       return indicatePessimisticFixpoint();
7733 
7734     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7735     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7736 
7737     // TODO: make use of undef flag to limit potential values aggressively.
7738     bool MaybeTrue = false, MaybeFalse = false;
7739     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7740     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7741       // The result of any comparison between undefs can be soundly replaced
7742       // with undef.
7743       unionAssumedWithUndef();
7744     } else if (LHSAA.undefIsContained()) {
7745       bool MaybeTrue = false, MaybeFalse = false;
7746       for (const APInt &R : RHSAAPVS) {
7747         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7748         MaybeTrue |= CmpResult;
7749         MaybeFalse |= !CmpResult;
7750         if (MaybeTrue & MaybeFalse)
7751           return indicatePessimisticFixpoint();
7752       }
7753     } else if (RHSAA.undefIsContained()) {
7754       for (const APInt &L : LHSAAPVS) {
7755         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7756         MaybeTrue |= CmpResult;
7757         MaybeFalse |= !CmpResult;
7758         if (MaybeTrue & MaybeFalse)
7759           return indicatePessimisticFixpoint();
7760       }
7761     } else {
7762       for (const APInt &L : LHSAAPVS) {
7763         for (const APInt &R : RHSAAPVS) {
7764           bool CmpResult = calculateICmpInst(ICI, L, R);
7765           MaybeTrue |= CmpResult;
7766           MaybeFalse |= !CmpResult;
7767           if (MaybeTrue & MaybeFalse)
7768             return indicatePessimisticFixpoint();
7769         }
7770       }
7771     }
7772     if (MaybeTrue)
7773       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7774     if (MaybeFalse)
7775       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7776     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7777                                          : ChangeStatus::CHANGED;
7778   }
7779 
7780   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7781     auto AssumedBefore = getAssumed();
7782     Value *LHS = SI->getTrueValue();
7783     Value *RHS = SI->getFalseValue();
7784     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7785       return indicatePessimisticFixpoint();
7786 
7787     // TODO: Use assumed simplified condition value
7788     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7789                                                 DepClassTy::REQUIRED);
7790     if (!LHSAA.isValidState())
7791       return indicatePessimisticFixpoint();
7792 
7793     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7794                                                 DepClassTy::REQUIRED);
7795     if (!RHSAA.isValidState())
7796       return indicatePessimisticFixpoint();
7797 
7798     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7799       // select i1 *, undef , undef => undef
7800       unionAssumedWithUndef();
7801     else {
7802       unionAssumed(LHSAA);
7803       unionAssumed(RHSAA);
7804     }
7805     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7806                                          : ChangeStatus::CHANGED;
7807   }
7808 
7809   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7810     auto AssumedBefore = getAssumed();
7811     if (!CI->isIntegerCast())
7812       return indicatePessimisticFixpoint();
7813     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7814     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7815     Value *Src = CI->getOperand(0);
7816     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7817                                                 DepClassTy::REQUIRED);
7818     if (!SrcAA.isValidState())
7819       return indicatePessimisticFixpoint();
7820     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7821     if (SrcAA.undefIsContained())
7822       unionAssumedWithUndef();
7823     else {
7824       for (const APInt &S : SrcAAPVS) {
7825         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7826         unionAssumed(T);
7827       }
7828     }
7829     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7830                                          : ChangeStatus::CHANGED;
7831   }
7832 
7833   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7834     auto AssumedBefore = getAssumed();
7835     Value *LHS = BinOp->getOperand(0);
7836     Value *RHS = BinOp->getOperand(1);
7837     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7838       return indicatePessimisticFixpoint();
7839 
7840     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7841                                                 DepClassTy::REQUIRED);
7842     if (!LHSAA.isValidState())
7843       return indicatePessimisticFixpoint();
7844 
7845     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7846                                                 DepClassTy::REQUIRED);
7847     if (!RHSAA.isValidState())
7848       return indicatePessimisticFixpoint();
7849 
7850     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7851     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7852     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7853 
7854     // TODO: make use of undef flag to limit potential values aggressively.
7855     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7856       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7857         return indicatePessimisticFixpoint();
7858     } else if (LHSAA.undefIsContained()) {
7859       for (const APInt &R : RHSAAPVS) {
7860         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7861           return indicatePessimisticFixpoint();
7862       }
7863     } else if (RHSAA.undefIsContained()) {
7864       for (const APInt &L : LHSAAPVS) {
7865         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7866           return indicatePessimisticFixpoint();
7867       }
7868     } else {
7869       for (const APInt &L : LHSAAPVS) {
7870         for (const APInt &R : RHSAAPVS) {
7871           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7872             return indicatePessimisticFixpoint();
7873         }
7874       }
7875     }
7876     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7877                                          : ChangeStatus::CHANGED;
7878   }
7879 
7880   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7881     auto AssumedBefore = getAssumed();
7882     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7883       Value *IncomingValue = PHI->getIncomingValue(u);
7884       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7885           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7886       if (!PotentialValuesAA.isValidState())
7887         return indicatePessimisticFixpoint();
7888       if (PotentialValuesAA.undefIsContained())
7889         unionAssumedWithUndef();
7890       else
7891         unionAssumed(PotentialValuesAA.getAssumed());
7892     }
7893     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7894                                          : ChangeStatus::CHANGED;
7895   }
7896 
7897   /// See AbstractAttribute::updateImpl(...).
7898   ChangeStatus updateImpl(Attributor &A) override {
7899     Value &V = getAssociatedValue();
7900     Instruction *I = dyn_cast<Instruction>(&V);
7901 
7902     if (auto *ICI = dyn_cast<ICmpInst>(I))
7903       return updateWithICmpInst(A, ICI);
7904 
7905     if (auto *SI = dyn_cast<SelectInst>(I))
7906       return updateWithSelectInst(A, SI);
7907 
7908     if (auto *CI = dyn_cast<CastInst>(I))
7909       return updateWithCastInst(A, CI);
7910 
7911     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7912       return updateWithBinaryOperator(A, BinOp);
7913 
7914     if (auto *PHI = dyn_cast<PHINode>(I))
7915       return updateWithPHINode(A, PHI);
7916 
7917     return indicatePessimisticFixpoint();
7918   }
7919 
7920   /// See AbstractAttribute::trackStatistics()
7921   void trackStatistics() const override {
7922     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7923   }
7924 };
7925 
7926 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7927   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7928       : AAPotentialValuesImpl(IRP, A) {}
7929 
7930   /// See AbstractAttribute::initialize(...).
7931   ChangeStatus updateImpl(Attributor &A) override {
7932     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7933                      "not be called");
7934   }
7935 
7936   /// See AbstractAttribute::trackStatistics()
7937   void trackStatistics() const override {
7938     STATS_DECLTRACK_FN_ATTR(potential_values)
7939   }
7940 };
7941 
7942 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7943   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7944       : AAPotentialValuesFunction(IRP, A) {}
7945 
7946   /// See AbstractAttribute::trackStatistics()
7947   void trackStatistics() const override {
7948     STATS_DECLTRACK_CS_ATTR(potential_values)
7949   }
7950 };
7951 
7952 struct AAPotentialValuesCallSiteReturned
7953     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7954   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7955       : AACallSiteReturnedFromReturned<AAPotentialValues,
7956                                        AAPotentialValuesImpl>(IRP, A) {}
7957 
7958   /// See AbstractAttribute::trackStatistics()
7959   void trackStatistics() const override {
7960     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7961   }
7962 };
7963 
7964 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7965   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7966       : AAPotentialValuesFloating(IRP, A) {}
7967 
7968   /// See AbstractAttribute::initialize(..).
7969   void initialize(Attributor &A) override {
7970     Value &V = getAssociatedValue();
7971 
7972     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7973       unionAssumed(C->getValue());
7974       indicateOptimisticFixpoint();
7975       return;
7976     }
7977 
7978     if (isa<UndefValue>(&V)) {
7979       unionAssumedWithUndef();
7980       indicateOptimisticFixpoint();
7981       return;
7982     }
7983   }
7984 
7985   /// See AbstractAttribute::updateImpl(...).
7986   ChangeStatus updateImpl(Attributor &A) override {
7987     Value &V = getAssociatedValue();
7988     auto AssumedBefore = getAssumed();
7989     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7990                                              DepClassTy::REQUIRED);
7991     const auto &S = AA.getAssumed();
7992     unionAssumed(S);
7993     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7994                                          : ChangeStatus::CHANGED;
7995   }
7996 
7997   /// See AbstractAttribute::trackStatistics()
7998   void trackStatistics() const override {
7999     STATS_DECLTRACK_CSARG_ATTR(potential_values)
8000   }
8001 };
8002 
8003 /// ------------------------ NoUndef Attribute ---------------------------------
8004 struct AANoUndefImpl : AANoUndef {
8005   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8006 
8007   /// See AbstractAttribute::initialize(...).
8008   void initialize(Attributor &A) override {
8009     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8010       indicateOptimisticFixpoint();
8011       return;
8012     }
8013     Value &V = getAssociatedValue();
8014     if (isa<UndefValue>(V))
8015       indicatePessimisticFixpoint();
8016     else if (isa<FreezeInst>(V))
8017       indicateOptimisticFixpoint();
8018     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8019              isGuaranteedNotToBeUndefOrPoison(&V))
8020       indicateOptimisticFixpoint();
8021     else
8022       AANoUndef::initialize(A);
8023   }
8024 
8025   /// See followUsesInMBEC
8026   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8027                        AANoUndef::StateType &State) {
8028     const Value *UseV = U->get();
8029     const DominatorTree *DT = nullptr;
8030     AssumptionCache *AC = nullptr;
8031     InformationCache &InfoCache = A.getInfoCache();
8032     if (Function *F = getAnchorScope()) {
8033       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8034       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8035     }
8036     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8037     bool TrackUse = false;
8038     // Track use for instructions which must produce undef or poison bits when
8039     // at least one operand contains such bits.
8040     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8041       TrackUse = true;
8042     return TrackUse;
8043   }
8044 
8045   /// See AbstractAttribute::getAsStr().
8046   const std::string getAsStr() const override {
8047     return getAssumed() ? "noundef" : "may-undef-or-poison";
8048   }
8049 
8050   ChangeStatus manifest(Attributor &A) override {
8051     // We don't manifest noundef attribute for dead positions because the
8052     // associated values with dead positions would be replaced with undef
8053     // values.
8054     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8055       return ChangeStatus::UNCHANGED;
8056     // A position whose simplified value does not have any value is
8057     // considered to be dead. We don't manifest noundef in such positions for
8058     // the same reason above.
8059     auto &ValueSimplifyAA =
8060         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8061     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8062       return ChangeStatus::UNCHANGED;
8063     return AANoUndef::manifest(A);
8064   }
8065 };
8066 
8067 struct AANoUndefFloating : public AANoUndefImpl {
8068   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8069       : AANoUndefImpl(IRP, A) {}
8070 
8071   /// See AbstractAttribute::initialize(...).
8072   void initialize(Attributor &A) override {
8073     AANoUndefImpl::initialize(A);
8074     if (!getState().isAtFixpoint())
8075       if (Instruction *CtxI = getCtxI())
8076         followUsesInMBEC(*this, A, getState(), *CtxI);
8077   }
8078 
8079   /// See AbstractAttribute::updateImpl(...).
8080   ChangeStatus updateImpl(Attributor &A) override {
8081     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8082                             AANoUndef::StateType &T, bool Stripped) -> bool {
8083       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8084                                              DepClassTy::REQUIRED);
8085       if (!Stripped && this == &AA) {
8086         T.indicatePessimisticFixpoint();
8087       } else {
8088         const AANoUndef::StateType &S =
8089             static_cast<const AANoUndef::StateType &>(AA.getState());
8090         T ^= S;
8091       }
8092       return T.isValidState();
8093     };
8094 
8095     StateType T;
8096     if (!genericValueTraversal<AANoUndef, StateType>(
8097             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8098       return indicatePessimisticFixpoint();
8099 
8100     return clampStateAndIndicateChange(getState(), T);
8101   }
8102 
8103   /// See AbstractAttribute::trackStatistics()
8104   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8105 };
8106 
8107 struct AANoUndefReturned final
8108     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8109   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8110       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8111 
8112   /// See AbstractAttribute::trackStatistics()
8113   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8114 };
8115 
8116 struct AANoUndefArgument final
8117     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8118   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8119       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8120 
8121   /// See AbstractAttribute::trackStatistics()
8122   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8123 };
8124 
8125 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8126   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8127       : AANoUndefFloating(IRP, A) {}
8128 
8129   /// See AbstractAttribute::trackStatistics()
8130   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8131 };
8132 
8133 struct AANoUndefCallSiteReturned final
8134     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8135   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8136       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8137 
8138   /// See AbstractAttribute::trackStatistics()
8139   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8140 };
8141 } // namespace
8142 
8143 const char AAReturnedValues::ID = 0;
8144 const char AANoUnwind::ID = 0;
8145 const char AANoSync::ID = 0;
8146 const char AANoFree::ID = 0;
8147 const char AANonNull::ID = 0;
8148 const char AANoRecurse::ID = 0;
8149 const char AAWillReturn::ID = 0;
8150 const char AAUndefinedBehavior::ID = 0;
8151 const char AANoAlias::ID = 0;
8152 const char AAReachability::ID = 0;
8153 const char AANoReturn::ID = 0;
8154 const char AAIsDead::ID = 0;
8155 const char AADereferenceable::ID = 0;
8156 const char AAAlign::ID = 0;
8157 const char AANoCapture::ID = 0;
8158 const char AAValueSimplify::ID = 0;
8159 const char AAHeapToStack::ID = 0;
8160 const char AAPrivatizablePtr::ID = 0;
8161 const char AAMemoryBehavior::ID = 0;
8162 const char AAMemoryLocation::ID = 0;
8163 const char AAValueConstantRange::ID = 0;
8164 const char AAPotentialValues::ID = 0;
8165 const char AANoUndef::ID = 0;
8166 
8167 // Macro magic to create the static generator function for attributes that
8168 // follow the naming scheme.
8169 
8170 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8171   case IRPosition::PK:                                                         \
8172     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8173 
8174 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8175   case IRPosition::PK:                                                         \
8176     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8177     ++NumAAs;                                                                  \
8178     break;
8179 
8180 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8181   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8182     CLASS *AA = nullptr;                                                       \
8183     switch (IRP.getPositionKind()) {                                           \
8184       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8185       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8186       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8187       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8188       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8189       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8190       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8191       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8192     }                                                                          \
8193     return *AA;                                                                \
8194   }
8195 
8196 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8197   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8198     CLASS *AA = nullptr;                                                       \
8199     switch (IRP.getPositionKind()) {                                           \
8200       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8201       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8202       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8203       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8204       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8205       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8206       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8207       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8208     }                                                                          \
8209     return *AA;                                                                \
8210   }
8211 
8212 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8213   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8214     CLASS *AA = nullptr;                                                       \
8215     switch (IRP.getPositionKind()) {                                           \
8216       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8217       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8218       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8219       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8220       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8221       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8222       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8223       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8224     }                                                                          \
8225     return *AA;                                                                \
8226   }
8227 
8228 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8229   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8230     CLASS *AA = nullptr;                                                       \
8231     switch (IRP.getPositionKind()) {                                           \
8232       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8233       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8234       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8235       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8236       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8237       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8238       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8239       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8240     }                                                                          \
8241     return *AA;                                                                \
8242   }
8243 
8244 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8245   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8246     CLASS *AA = nullptr;                                                       \
8247     switch (IRP.getPositionKind()) {                                           \
8248       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8249       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8250       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8251       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8252       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8253       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8254       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8255       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8256     }                                                                          \
8257     return *AA;                                                                \
8258   }
8259 
8260 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8261 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8262 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8263 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8264 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8265 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8266 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8267 
8268 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8269 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8270 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8271 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8272 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8273 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8274 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8275 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8276 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8277 
8278 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8279 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8280 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8281 
8282 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8283 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8284 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8285 
8286 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8287 
8288 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8289 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8290 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8291 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8292 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8293 #undef SWITCH_PK_CREATE
8294 #undef SWITCH_PK_INV
8295