1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 /// Get pointer operand of memory accessing instruction. If \p I is
144 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
145 /// is set to false and the instruction is volatile, return nullptr.
146 static const Value *getPointerOperand(const Instruction *I,
147                                       bool AllowVolatile) {
148   if (!AllowVolatile && I->isVolatile())
149     return nullptr;
150 
151   if (auto *LI = dyn_cast<LoadInst>(I)) {
152     return LI->getPointerOperand();
153   }
154 
155   if (auto *SI = dyn_cast<StoreInst>(I)) {
156     return SI->getPointerOperand();
157   }
158 
159   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
160     return CXI->getPointerOperand();
161   }
162 
163   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
164     return RMWI->getPointerOperand();
165   }
166 
167   return nullptr;
168 }
169 
170 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
171 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
172 /// getelement pointer instructions that traverse the natural type of \p Ptr if
173 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
174 /// through a cast to i8*.
175 ///
176 /// TODO: This could probably live somewhere more prominantly if it doesn't
177 ///       already exist.
178 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
179                                int64_t Offset, IRBuilder<NoFolder> &IRB,
180                                const DataLayout &DL) {
181   assert(Offset >= 0 && "Negative offset not supported yet!");
182   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
183                     << "-bytes as " << *ResTy << "\n");
184 
185   if (Offset) {
186     SmallVector<Value *, 4> Indices;
187     std::string GEPName = Ptr->getName().str() + ".0";
188 
189     // Add 0 index to look through the pointer.
190     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
191            "Offset out of bounds");
192     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
193 
194     Type *Ty = PtrElemTy;
195     do {
196       auto *STy = dyn_cast<StructType>(Ty);
197       if (!STy)
198         // Non-aggregate type, we cast and make byte-wise progress now.
199         break;
200 
201       const StructLayout *SL = DL.getStructLayout(STy);
202       if (int64_t(SL->getSizeInBytes()) < Offset)
203         break;
204 
205       uint64_t Idx = SL->getElementContainingOffset(Offset);
206       assert(Idx < STy->getNumElements() && "Offset calculation error!");
207       uint64_t Rem = Offset - SL->getElementOffset(Idx);
208       Ty = STy->getElementType(Idx);
209 
210       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213       GEPName += "." + std::to_string(Idx);
214       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215       Offset = Rem;
216     } while (Offset);
217 
218     // Create a GEP for the indices collected above.
219     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
220 
221     // If an offset is left we use byte-wise adjustment.
222     if (Offset) {
223       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
224       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
225                           GEPName + ".b" + Twine(Offset));
226     }
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA,
257         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
258         DepClassTy::NONE);
259   bool AnyDead = false;
260 
261   using Item = std::pair<Value *, const Instruction *>;
262   SmallSet<Item, 16> Visited;
263   SmallVector<Item, 16> Worklist;
264   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
265 
266   int Iteration = 0;
267   do {
268     Item I = Worklist.pop_back_val();
269     Value *V = I.first;
270     CtxI = I.second;
271     if (StripCB)
272       V = StripCB(V);
273 
274     // Check if we should process the current value. To prevent endless
275     // recursion keep a record of the values we followed!
276     if (!Visited.insert(I).second)
277       continue;
278 
279     // Make sure we limit the compile time for complex expressions.
280     if (Iteration++ >= MaxValues)
281       return false;
282 
283     // Explicitly look through calls with a "returned" attribute if we do
284     // not have a pointer as stripPointerCasts only works on them.
285     Value *NewV = nullptr;
286     if (V->getType()->isPointerTy()) {
287       NewV = V->stripPointerCasts();
288     } else {
289       auto *CB = dyn_cast<CallBase>(V);
290       if (CB && CB->getCalledFunction()) {
291         for (Argument &Arg : CB->getCalledFunction()->args())
292           if (Arg.hasReturnedAttr()) {
293             NewV = CB->getArgOperand(Arg.getArgNo());
294             break;
295           }
296       }
297     }
298     if (NewV && NewV != V) {
299       Worklist.push_back({NewV, CtxI});
300       continue;
301     }
302 
303     // Look through select instructions, visit both potential values.
304     if (auto *SI = dyn_cast<SelectInst>(V)) {
305       Worklist.push_back({SI->getTrueValue(), CtxI});
306       Worklist.push_back({SI->getFalseValue(), CtxI});
307       continue;
308     }
309 
310     // Look through phi nodes, visit all live operands.
311     if (auto *PHI = dyn_cast<PHINode>(V)) {
312       assert(LivenessAA &&
313              "Expected liveness in the presence of instructions!");
314       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
315         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
316         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
317                             LivenessAA,
318                             /* CheckBBLivenessOnly */ true)) {
319           AnyDead = true;
320           continue;
321         }
322         Worklist.push_back(
323             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
324       }
325       continue;
326     }
327 
328     if (UseValueSimplify && !isa<Constant>(V)) {
329       bool UsedAssumedInformation = false;
330       Optional<Constant *> C =
331           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
332       if (!C.hasValue())
333         continue;
334       if (Value *NewV = C.getValue()) {
335         Worklist.push_back({NewV, CtxI});
336         continue;
337       }
338     }
339 
340     // Once a leaf is reached we inform the user through the callback.
341     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
342       return false;
343   } while (!Worklist.empty());
344 
345   // If we actually used liveness information so we have to record a dependence.
346   if (AnyDead)
347     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
348 
349   // All values have been visited.
350   return true;
351 }
352 
353 const Value *stripAndAccumulateMinimalOffsets(
354     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
355     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
356     bool UseAssumed = false) {
357 
358   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
359     const IRPosition &Pos = IRPosition::value(V);
360     // Only track dependence if we are going to use the assumed info.
361     const AAValueConstantRange &ValueConstantRangeAA =
362         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
363                                          UseAssumed ? DepClassTy::OPTIONAL
364                                                     : DepClassTy::NONE);
365     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
366                                      : ValueConstantRangeAA.getKnown();
367     // We can only use the lower part of the range because the upper part can
368     // be higher than what the value can really be.
369     ROffset = Range.getSignedMin();
370     return true;
371   };
372 
373   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
374                                                 AttributorAnalysis);
375 }
376 
377 static const Value *getMinimalBaseOfAccsesPointerOperand(
378     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
379     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
380   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
381   if (!Ptr)
382     return nullptr;
383   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
384   const Value *Base = stripAndAccumulateMinimalOffsets(
385       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
386 
387   BytesOffset = OffsetAPInt.getSExtValue();
388   return Base;
389 }
390 
391 static const Value *
392 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
393                                      const DataLayout &DL,
394                                      bool AllowNonInbounds = false) {
395   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
396   if (!Ptr)
397     return nullptr;
398 
399   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
400                                           AllowNonInbounds);
401 }
402 
403 /// Helper function to clamp a state \p S of type \p StateType with the
404 /// information in \p R and indicate/return if \p S did change (as-in update is
405 /// required to be run again).
406 template <typename StateType>
407 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
408   auto Assumed = S.getAssumed();
409   S ^= R;
410   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
411                                    : ChangeStatus::CHANGED;
412 }
413 
414 /// Clamp the information known for all returned values of a function
415 /// (identified by \p QueryingAA) into \p S.
416 template <typename AAType, typename StateType = typename AAType::StateType>
417 static void clampReturnedValueStates(
418     Attributor &A, const AAType &QueryingAA, StateType &S,
419     const IRPosition::CallBaseContext *CBContext = nullptr) {
420   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
421                     << QueryingAA << " into " << S << "\n");
422 
423   assert((QueryingAA.getIRPosition().getPositionKind() ==
424               IRPosition::IRP_RETURNED ||
425           QueryingAA.getIRPosition().getPositionKind() ==
426               IRPosition::IRP_CALL_SITE_RETURNED) &&
427          "Can only clamp returned value states for a function returned or call "
428          "site returned position!");
429 
430   // Use an optional state as there might not be any return values and we want
431   // to join (IntegerState::operator&) the state of all there are.
432   Optional<StateType> T;
433 
434   // Callback for each possibly returned value.
435   auto CheckReturnValue = [&](Value &RV) -> bool {
436     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
437     const AAType &AA =
438         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
439     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
440                       << " @ " << RVPos << "\n");
441     const StateType &AAS = AA.getState();
442     if (T.hasValue())
443       *T &= AAS;
444     else
445       T = AAS;
446     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
447                       << "\n");
448     return T->isValidState();
449   };
450 
451   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
452     S.indicatePessimisticFixpoint();
453   else if (T.hasValue())
454     S ^= *T;
455 }
456 
457 /// Helper class for generic deduction: return value -> returned position.
458 template <typename AAType, typename BaseType,
459           typename StateType = typename BaseType::StateType,
460           bool PropagateCallBaseContext = false>
461 struct AAReturnedFromReturnedValues : public BaseType {
462   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
463       : BaseType(IRP, A) {}
464 
465   /// See AbstractAttribute::updateImpl(...).
466   ChangeStatus updateImpl(Attributor &A) override {
467     StateType S(StateType::getBestState(this->getState()));
468     clampReturnedValueStates<AAType, StateType>(
469         A, *this, S,
470         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
471     // TODO: If we know we visited all returned values, thus no are assumed
472     // dead, we can take the known information from the state T.
473     return clampStateAndIndicateChange<StateType>(this->getState(), S);
474   }
475 };
476 
477 /// Clamp the information known at all call sites for a given argument
478 /// (identified by \p QueryingAA) into \p S.
479 template <typename AAType, typename StateType = typename AAType::StateType>
480 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
481                                         StateType &S) {
482   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
483                     << QueryingAA << " into " << S << "\n");
484 
485   assert(QueryingAA.getIRPosition().getPositionKind() ==
486              IRPosition::IRP_ARGUMENT &&
487          "Can only clamp call site argument states for an argument position!");
488 
489   // Use an optional state as there might not be any return values and we want
490   // to join (IntegerState::operator&) the state of all there are.
491   Optional<StateType> T;
492 
493   // The argument number which is also the call site argument number.
494   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
495 
496   auto CallSiteCheck = [&](AbstractCallSite ACS) {
497     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
498     // Check if a coresponding argument was found or if it is on not associated
499     // (which can happen for callback calls).
500     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
501       return false;
502 
503     const AAType &AA =
504         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
505     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
506                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
507     const StateType &AAS = AA.getState();
508     if (T.hasValue())
509       *T &= AAS;
510     else
511       T = AAS;
512     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
513                       << "\n");
514     return T->isValidState();
515   };
516 
517   bool AllCallSitesKnown;
518   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
519                               AllCallSitesKnown))
520     S.indicatePessimisticFixpoint();
521   else if (T.hasValue())
522     S ^= *T;
523 }
524 
525 /// This function is the bridge between argument position and the call base
526 /// context.
527 template <typename AAType, typename BaseType,
528           typename StateType = typename AAType::StateType>
529 bool getArgumentStateFromCallBaseContext(Attributor &A,
530                                          BaseType &QueryingAttribute,
531                                          IRPosition &Pos, StateType &State) {
532   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
533          "Expected an 'argument' position !");
534   const CallBase *CBContext = Pos.getCallBaseContext();
535   if (!CBContext)
536     return false;
537 
538   int ArgNo = Pos.getCallSiteArgNo();
539   assert(ArgNo >= 0 && "Invalid Arg No!");
540 
541   const auto &AA = A.getAAFor<AAType>(
542       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
543       DepClassTy::REQUIRED);
544   const StateType &CBArgumentState =
545       static_cast<const StateType &>(AA.getState());
546 
547   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
548                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
549                     << "\n");
550 
551   // NOTE: If we want to do call site grouping it should happen here.
552   State ^= CBArgumentState;
553   return true;
554 }
555 
556 /// Helper class for generic deduction: call site argument -> argument position.
557 template <typename AAType, typename BaseType,
558           typename StateType = typename AAType::StateType,
559           bool BridgeCallBaseContext = false>
560 struct AAArgumentFromCallSiteArguments : public BaseType {
561   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
562       : BaseType(IRP, A) {}
563 
564   /// See AbstractAttribute::updateImpl(...).
565   ChangeStatus updateImpl(Attributor &A) override {
566     StateType S = StateType::getBestState(this->getState());
567 
568     if (BridgeCallBaseContext) {
569       bool Success =
570           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
571               A, *this, this->getIRPosition(), S);
572       if (Success)
573         return clampStateAndIndicateChange<StateType>(this->getState(), S);
574     }
575     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
576 
577     // TODO: If we know we visited all incoming values, thus no are assumed
578     // dead, we can take the known information from the state T.
579     return clampStateAndIndicateChange<StateType>(this->getState(), S);
580   }
581 };
582 
583 /// Helper class for generic replication: function returned -> cs returned.
584 template <typename AAType, typename BaseType,
585           typename StateType = typename BaseType::StateType,
586           bool IntroduceCallBaseContext = false>
587 struct AACallSiteReturnedFromReturned : public BaseType {
588   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
589       : BaseType(IRP, A) {}
590 
591   /// See AbstractAttribute::updateImpl(...).
592   ChangeStatus updateImpl(Attributor &A) override {
593     assert(this->getIRPosition().getPositionKind() ==
594                IRPosition::IRP_CALL_SITE_RETURNED &&
595            "Can only wrap function returned positions for call site returned "
596            "positions!");
597     auto &S = this->getState();
598 
599     const Function *AssociatedFunction =
600         this->getIRPosition().getAssociatedFunction();
601     if (!AssociatedFunction)
602       return S.indicatePessimisticFixpoint();
603 
604     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
605     if (IntroduceCallBaseContext)
606       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
607                         << CBContext << "\n");
608 
609     IRPosition FnPos = IRPosition::returned(
610         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
611     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
612     return clampStateAndIndicateChange(S, AA.getState());
613   }
614 };
615 
616 /// Helper function to accumulate uses.
617 template <class AAType, typename StateType = typename AAType::StateType>
618 static void followUsesInContext(AAType &AA, Attributor &A,
619                                 MustBeExecutedContextExplorer &Explorer,
620                                 const Instruction *CtxI,
621                                 SetVector<const Use *> &Uses,
622                                 StateType &State) {
623   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
624   for (unsigned u = 0; u < Uses.size(); ++u) {
625     const Use *U = Uses[u];
626     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
627       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
628       if (Found && AA.followUseInMBEC(A, U, UserI, State))
629         for (const Use &Us : UserI->uses())
630           Uses.insert(&Us);
631     }
632   }
633 }
634 
635 /// Use the must-be-executed-context around \p I to add information into \p S.
636 /// The AAType class is required to have `followUseInMBEC` method with the
637 /// following signature and behaviour:
638 ///
639 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
640 /// U - Underlying use.
641 /// I - The user of the \p U.
642 /// Returns true if the value should be tracked transitively.
643 ///
644 template <class AAType, typename StateType = typename AAType::StateType>
645 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
646                              Instruction &CtxI) {
647 
648   // Container for (transitive) uses of the associated value.
649   SetVector<const Use *> Uses;
650   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
651     Uses.insert(&U);
652 
653   MustBeExecutedContextExplorer &Explorer =
654       A.getInfoCache().getMustBeExecutedContextExplorer();
655 
656   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
657 
658   if (S.isAtFixpoint())
659     return;
660 
661   SmallVector<const BranchInst *, 4> BrInsts;
662   auto Pred = [&](const Instruction *I) {
663     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
664       if (Br->isConditional())
665         BrInsts.push_back(Br);
666     return true;
667   };
668 
669   // Here, accumulate conditional branch instructions in the context. We
670   // explore the child paths and collect the known states. The disjunction of
671   // those states can be merged to its own state. Let ParentState_i be a state
672   // to indicate the known information for an i-th branch instruction in the
673   // context. ChildStates are created for its successors respectively.
674   //
675   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
676   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
677   //      ...
678   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
679   //
680   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
681   //
682   // FIXME: Currently, recursive branches are not handled. For example, we
683   // can't deduce that ptr must be dereferenced in below function.
684   //
685   // void f(int a, int c, int *ptr) {
686   //    if(a)
687   //      if (b) {
688   //        *ptr = 0;
689   //      } else {
690   //        *ptr = 1;
691   //      }
692   //    else {
693   //      if (b) {
694   //        *ptr = 0;
695   //      } else {
696   //        *ptr = 1;
697   //      }
698   //    }
699   // }
700 
701   Explorer.checkForAllContext(&CtxI, Pred);
702   for (const BranchInst *Br : BrInsts) {
703     StateType ParentState;
704 
705     // The known state of the parent state is a conjunction of children's
706     // known states so it is initialized with a best state.
707     ParentState.indicateOptimisticFixpoint();
708 
709     for (const BasicBlock *BB : Br->successors()) {
710       StateType ChildState;
711 
712       size_t BeforeSize = Uses.size();
713       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
714 
715       // Erase uses which only appear in the child.
716       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
717         It = Uses.erase(It);
718 
719       ParentState &= ChildState;
720     }
721 
722     // Use only known state.
723     S += ParentState;
724   }
725 }
726 
727 /// -----------------------NoUnwind Function Attribute--------------------------
728 
729 struct AANoUnwindImpl : AANoUnwind {
730   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
731 
732   const std::string getAsStr() const override {
733     return getAssumed() ? "nounwind" : "may-unwind";
734   }
735 
736   /// See AbstractAttribute::updateImpl(...).
737   ChangeStatus updateImpl(Attributor &A) override {
738     auto Opcodes = {
739         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
740         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
741         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
742 
743     auto CheckForNoUnwind = [&](Instruction &I) {
744       if (!I.mayThrow())
745         return true;
746 
747       if (const auto *CB = dyn_cast<CallBase>(&I)) {
748         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
749             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
750         return NoUnwindAA.isAssumedNoUnwind();
751       }
752       return false;
753     };
754 
755     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
756       return indicatePessimisticFixpoint();
757 
758     return ChangeStatus::UNCHANGED;
759   }
760 };
761 
762 struct AANoUnwindFunction final : public AANoUnwindImpl {
763   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
764       : AANoUnwindImpl(IRP, A) {}
765 
766   /// See AbstractAttribute::trackStatistics()
767   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
768 };
769 
770 /// NoUnwind attribute deduction for a call sites.
771 struct AANoUnwindCallSite final : AANoUnwindImpl {
772   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
773       : AANoUnwindImpl(IRP, A) {}
774 
775   /// See AbstractAttribute::initialize(...).
776   void initialize(Attributor &A) override {
777     AANoUnwindImpl::initialize(A);
778     Function *F = getAssociatedFunction();
779     if (!F || F->isDeclaration())
780       indicatePessimisticFixpoint();
781   }
782 
783   /// See AbstractAttribute::updateImpl(...).
784   ChangeStatus updateImpl(Attributor &A) override {
785     // TODO: Once we have call site specific value information we can provide
786     //       call site specific liveness information and then it makes
787     //       sense to specialize attributes for call sites arguments instead of
788     //       redirecting requests to the callee argument.
789     Function *F = getAssociatedFunction();
790     const IRPosition &FnPos = IRPosition::function(*F);
791     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
792     return clampStateAndIndicateChange(getState(), FnAA.getState());
793   }
794 
795   /// See AbstractAttribute::trackStatistics()
796   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
797 };
798 
799 /// --------------------- Function Return Values -------------------------------
800 
801 /// "Attribute" that collects all potential returned values and the return
802 /// instructions that they arise from.
803 ///
804 /// If there is a unique returned value R, the manifest method will:
805 ///   - mark R with the "returned" attribute, if R is an argument.
806 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
807 
808   /// Mapping of values potentially returned by the associated function to the
809   /// return instructions that might return them.
810   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
811 
812   /// Mapping to remember the number of returned values for a call site such
813   /// that we can avoid updates if nothing changed.
814   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
815 
816   /// Set of unresolved calls returned by the associated function.
817   SmallSetVector<CallBase *, 4> UnresolvedCalls;
818 
819   /// State flags
820   ///
821   ///{
822   bool IsFixed = false;
823   bool IsValidState = true;
824   ///}
825 
826 public:
827   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
828       : AAReturnedValues(IRP, A) {}
829 
830   /// See AbstractAttribute::initialize(...).
831   void initialize(Attributor &A) override {
832     // Reset the state.
833     IsFixed = false;
834     IsValidState = true;
835     ReturnedValues.clear();
836 
837     Function *F = getAssociatedFunction();
838     if (!F || F->isDeclaration()) {
839       indicatePessimisticFixpoint();
840       return;
841     }
842     assert(!F->getReturnType()->isVoidTy() &&
843            "Did not expect a void return type!");
844 
845     // The map from instruction opcodes to those instructions in the function.
846     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
847 
848     // Look through all arguments, if one is marked as returned we are done.
849     for (Argument &Arg : F->args()) {
850       if (Arg.hasReturnedAttr()) {
851         auto &ReturnInstSet = ReturnedValues[&Arg];
852         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
853           for (Instruction *RI : *Insts)
854             ReturnInstSet.insert(cast<ReturnInst>(RI));
855 
856         indicateOptimisticFixpoint();
857         return;
858       }
859     }
860 
861     if (!A.isFunctionIPOAmendable(*F))
862       indicatePessimisticFixpoint();
863   }
864 
865   /// See AbstractAttribute::manifest(...).
866   ChangeStatus manifest(Attributor &A) override;
867 
868   /// See AbstractAttribute::getState(...).
869   AbstractState &getState() override { return *this; }
870 
871   /// See AbstractAttribute::getState(...).
872   const AbstractState &getState() const override { return *this; }
873 
874   /// See AbstractAttribute::updateImpl(Attributor &A).
875   ChangeStatus updateImpl(Attributor &A) override;
876 
877   llvm::iterator_range<iterator> returned_values() override {
878     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
879   }
880 
881   llvm::iterator_range<const_iterator> returned_values() const override {
882     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
883   }
884 
885   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
886     return UnresolvedCalls;
887   }
888 
889   /// Return the number of potential return values, -1 if unknown.
890   size_t getNumReturnValues() const override {
891     return isValidState() ? ReturnedValues.size() : -1;
892   }
893 
894   /// Return an assumed unique return value if a single candidate is found. If
895   /// there cannot be one, return a nullptr. If it is not clear yet, return the
896   /// Optional::NoneType.
897   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
898 
899   /// See AbstractState::checkForAllReturnedValues(...).
900   bool checkForAllReturnedValuesAndReturnInsts(
901       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
902       const override;
903 
904   /// Pretty print the attribute similar to the IR representation.
905   const std::string getAsStr() const override;
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return IsFixed; }
909 
910   /// See AbstractState::isValidState().
911   bool isValidState() const override { return IsValidState; }
912 
913   /// See AbstractState::indicateOptimisticFixpoint(...).
914   ChangeStatus indicateOptimisticFixpoint() override {
915     IsFixed = true;
916     return ChangeStatus::UNCHANGED;
917   }
918 
919   ChangeStatus indicatePessimisticFixpoint() override {
920     IsFixed = true;
921     IsValidState = false;
922     return ChangeStatus::CHANGED;
923   }
924 };
925 
926 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
927   ChangeStatus Changed = ChangeStatus::UNCHANGED;
928 
929   // Bookkeeping.
930   assert(isValidState());
931   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
932                   "Number of function with known return values");
933 
934   // Check if we have an assumed unique return value that we could manifest.
935   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
936 
937   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
938     return Changed;
939 
940   // Bookkeeping.
941   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
942                   "Number of function with unique return");
943 
944   // Callback to replace the uses of CB with the constant C.
945   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
946     if (CB.use_empty())
947       return ChangeStatus::UNCHANGED;
948     if (A.changeValueAfterManifest(CB, C))
949       return ChangeStatus::CHANGED;
950     return ChangeStatus::UNCHANGED;
951   };
952 
953   // If the assumed unique return value is an argument, annotate it.
954   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
955     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
956             getAssociatedFunction()->getReturnType())) {
957       getIRPosition() = IRPosition::argument(*UniqueRVArg);
958       Changed = IRAttribute::manifest(A);
959     }
960   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
961     // We can replace the returned value with the unique returned constant.
962     Value &AnchorValue = getAnchorValue();
963     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
964       for (const Use &U : F->uses())
965         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
966           if (CB->isCallee(&U)) {
967             Constant *RVCCast =
968                 CB->getType() == RVC->getType()
969                     ? RVC
970                     : ConstantExpr::getPointerCast(RVC, CB->getType());
971             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
972           }
973     } else {
974       assert(isa<CallBase>(AnchorValue) &&
975              "Expcected a function or call base anchor!");
976       Constant *RVCCast =
977           AnchorValue.getType() == RVC->getType()
978               ? RVC
979               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
980       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
981     }
982     if (Changed == ChangeStatus::CHANGED)
983       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
984                       "Number of function returns replaced by constant return");
985   }
986 
987   return Changed;
988 }
989 
990 const std::string AAReturnedValuesImpl::getAsStr() const {
991   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
992          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
993          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
994 }
995 
996 Optional<Value *>
997 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
998   // If checkForAllReturnedValues provides a unique value, ignoring potential
999   // undef values that can also be present, it is assumed to be the actual
1000   // return value and forwarded to the caller of this method. If there are
1001   // multiple, a nullptr is returned indicating there cannot be a unique
1002   // returned value.
1003   Optional<Value *> UniqueRV;
1004 
1005   auto Pred = [&](Value &RV) -> bool {
1006     // If we found a second returned value and neither the current nor the saved
1007     // one is an undef, there is no unique returned value. Undefs are special
1008     // since we can pretend they have any value.
1009     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1010         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1011       UniqueRV = nullptr;
1012       return false;
1013     }
1014 
1015     // Do not overwrite a value with an undef.
1016     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1017       UniqueRV = &RV;
1018 
1019     return true;
1020   };
1021 
1022   if (!A.checkForAllReturnedValues(Pred, *this))
1023     UniqueRV = nullptr;
1024 
1025   return UniqueRV;
1026 }
1027 
1028 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1029     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1030     const {
1031   if (!isValidState())
1032     return false;
1033 
1034   // Check all returned values but ignore call sites as long as we have not
1035   // encountered an overdefined one during an update.
1036   for (auto &It : ReturnedValues) {
1037     Value *RV = It.first;
1038 
1039     CallBase *CB = dyn_cast<CallBase>(RV);
1040     if (CB && !UnresolvedCalls.count(CB))
1041       continue;
1042 
1043     if (!Pred(*RV, It.second))
1044       return false;
1045   }
1046 
1047   return true;
1048 }
1049 
1050 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1051   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1052   bool Changed = false;
1053 
1054   // State used in the value traversals starting in returned values.
1055   struct RVState {
1056     // The map in which we collect return values -> return instrs.
1057     decltype(ReturnedValues) &RetValsMap;
1058     // The flag to indicate a change.
1059     bool &Changed;
1060     // The return instrs we come from.
1061     SmallSetVector<ReturnInst *, 4> RetInsts;
1062   };
1063 
1064   // Callback for a leaf value returned by the associated function.
1065   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1066                          bool) -> bool {
1067     auto Size = RVS.RetValsMap[&Val].size();
1068     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1069     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1070     RVS.Changed |= Inserted;
1071     LLVM_DEBUG({
1072       if (Inserted)
1073         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1074                << " => " << RVS.RetInsts.size() << "\n";
1075     });
1076     return true;
1077   };
1078 
1079   // Helper method to invoke the generic value traversal.
1080   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1081                                 const Instruction *CtxI) {
1082     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1083     return genericValueTraversal<AAReturnedValues, RVState>(
1084         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1085         /* UseValueSimplify */ false);
1086   };
1087 
1088   // Callback for all "return intructions" live in the associated function.
1089   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1090     ReturnInst &Ret = cast<ReturnInst>(I);
1091     RVState RVS({ReturnedValues, Changed, {}});
1092     RVS.RetInsts.insert(&Ret);
1093     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1094   };
1095 
1096   // Start by discovering returned values from all live returned instructions in
1097   // the associated function.
1098   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1099     return indicatePessimisticFixpoint();
1100 
1101   // Once returned values "directly" present in the code are handled we try to
1102   // resolve returned calls. To avoid modifications to the ReturnedValues map
1103   // while we iterate over it we kept record of potential new entries in a copy
1104   // map, NewRVsMap.
1105   decltype(ReturnedValues) NewRVsMap;
1106 
1107   auto HandleReturnValue = [&](Value *RV,
1108                                SmallSetVector<ReturnInst *, 4> &RIs) {
1109     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1110                       << RIs.size() << " RIs\n");
1111     CallBase *CB = dyn_cast<CallBase>(RV);
1112     if (!CB || UnresolvedCalls.count(CB))
1113       return;
1114 
1115     if (!CB->getCalledFunction()) {
1116       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1117                         << "\n");
1118       UnresolvedCalls.insert(CB);
1119       return;
1120     }
1121 
1122     // TODO: use the function scope once we have call site AAReturnedValues.
1123     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1124         *this, IRPosition::function(*CB->getCalledFunction()),
1125         DepClassTy::REQUIRED);
1126     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1127                       << RetValAA << "\n");
1128 
1129     // Skip dead ends, thus if we do not know anything about the returned
1130     // call we mark it as unresolved and it will stay that way.
1131     if (!RetValAA.getState().isValidState()) {
1132       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1133                         << "\n");
1134       UnresolvedCalls.insert(CB);
1135       return;
1136     }
1137 
1138     // Do not try to learn partial information. If the callee has unresolved
1139     // return values we will treat the call as unresolved/opaque.
1140     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1141     if (!RetValAAUnresolvedCalls.empty()) {
1142       UnresolvedCalls.insert(CB);
1143       return;
1144     }
1145 
1146     // Now check if we can track transitively returned values. If possible, thus
1147     // if all return value can be represented in the current scope, do so.
1148     bool Unresolved = false;
1149     for (auto &RetValAAIt : RetValAA.returned_values()) {
1150       Value *RetVal = RetValAAIt.first;
1151       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1152           isa<Constant>(RetVal))
1153         continue;
1154       // Anything that did not fit in the above categories cannot be resolved,
1155       // mark the call as unresolved.
1156       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1157                            "cannot be translated: "
1158                         << *RetVal << "\n");
1159       UnresolvedCalls.insert(CB);
1160       Unresolved = true;
1161       break;
1162     }
1163 
1164     if (Unresolved)
1165       return;
1166 
1167     // Now track transitively returned values.
1168     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1169     if (NumRetAA == RetValAA.getNumReturnValues()) {
1170       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1171                            "changed since it was seen last\n");
1172       return;
1173     }
1174     NumRetAA = RetValAA.getNumReturnValues();
1175 
1176     for (auto &RetValAAIt : RetValAA.returned_values()) {
1177       Value *RetVal = RetValAAIt.first;
1178       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1179         // Arguments are mapped to call site operands and we begin the traversal
1180         // again.
1181         bool Unused = false;
1182         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1183         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1184         continue;
1185       }
1186       if (isa<CallBase>(RetVal)) {
1187         // Call sites are resolved by the callee attribute over time, no need to
1188         // do anything for us.
1189         continue;
1190       }
1191       if (isa<Constant>(RetVal)) {
1192         // Constants are valid everywhere, we can simply take them.
1193         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1194         continue;
1195       }
1196     }
1197   };
1198 
1199   for (auto &It : ReturnedValues)
1200     HandleReturnValue(It.first, It.second);
1201 
1202   // Because processing the new information can again lead to new return values
1203   // we have to be careful and iterate until this iteration is complete. The
1204   // idea is that we are in a stable state at the end of an update. All return
1205   // values have been handled and properly categorized. We might not update
1206   // again if we have not requested a non-fix attribute so we cannot "wait" for
1207   // the next update to analyze a new return value.
1208   while (!NewRVsMap.empty()) {
1209     auto It = std::move(NewRVsMap.back());
1210     NewRVsMap.pop_back();
1211 
1212     assert(!It.second.empty() && "Entry does not add anything.");
1213     auto &ReturnInsts = ReturnedValues[It.first];
1214     for (ReturnInst *RI : It.second)
1215       if (ReturnInsts.insert(RI)) {
1216         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1217                           << *It.first << " => " << *RI << "\n");
1218         HandleReturnValue(It.first, ReturnInsts);
1219         Changed = true;
1220       }
1221   }
1222 
1223   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1224   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1225 }
1226 
1227 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1228   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1229       : AAReturnedValuesImpl(IRP, A) {}
1230 
1231   /// See AbstractAttribute::trackStatistics()
1232   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1233 };
1234 
1235 /// Returned values information for a call sites.
1236 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1237   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1238       : AAReturnedValuesImpl(IRP, A) {}
1239 
1240   /// See AbstractAttribute::initialize(...).
1241   void initialize(Attributor &A) override {
1242     // TODO: Once we have call site specific value information we can provide
1243     //       call site specific liveness information and then it makes
1244     //       sense to specialize attributes for call sites instead of
1245     //       redirecting requests to the callee.
1246     llvm_unreachable("Abstract attributes for returned values are not "
1247                      "supported for call sites yet!");
1248   }
1249 
1250   /// See AbstractAttribute::updateImpl(...).
1251   ChangeStatus updateImpl(Attributor &A) override {
1252     return indicatePessimisticFixpoint();
1253   }
1254 
1255   /// See AbstractAttribute::trackStatistics()
1256   void trackStatistics() const override {}
1257 };
1258 
1259 /// ------------------------ NoSync Function Attribute -------------------------
1260 
1261 struct AANoSyncImpl : AANoSync {
1262   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1263 
1264   const std::string getAsStr() const override {
1265     return getAssumed() ? "nosync" : "may-sync";
1266   }
1267 
1268   /// See AbstractAttribute::updateImpl(...).
1269   ChangeStatus updateImpl(Attributor &A) override;
1270 
1271   /// Helper function used to determine whether an instruction is non-relaxed
1272   /// atomic. In other words, if an atomic instruction does not have unordered
1273   /// or monotonic ordering
1274   static bool isNonRelaxedAtomic(Instruction *I);
1275 
1276   /// Helper function specific for intrinsics which are potentially volatile
1277   static bool isNoSyncIntrinsic(Instruction *I);
1278 };
1279 
1280 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1281   if (!I->isAtomic())
1282     return false;
1283 
1284   if (auto *FI = dyn_cast<FenceInst>(I))
1285     // All legal orderings for fence are stronger than monotonic.
1286     return FI->getSyncScopeID() != SyncScope::SingleThread;
1287   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1288     // Unordered is not a legal ordering for cmpxchg.
1289     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1290             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1291   }
1292 
1293   AtomicOrdering Ordering;
1294   switch (I->getOpcode()) {
1295   case Instruction::AtomicRMW:
1296     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1297     break;
1298   case Instruction::Store:
1299     Ordering = cast<StoreInst>(I)->getOrdering();
1300     break;
1301   case Instruction::Load:
1302     Ordering = cast<LoadInst>(I)->getOrdering();
1303     break;
1304   default:
1305     llvm_unreachable(
1306         "New atomic operations need to be known in the attributor.");
1307   }
1308 
1309   return (Ordering != AtomicOrdering::Unordered &&
1310           Ordering != AtomicOrdering::Monotonic);
1311 }
1312 
1313 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1314 /// which would be nosync except that they have a volatile flag.  All other
1315 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1316 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1317   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1318     return !MI->isVolatile();
1319   return false;
1320 }
1321 
1322 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1323 
1324   auto CheckRWInstForNoSync = [&](Instruction &I) {
1325     /// We are looking for volatile instructions or Non-Relaxed atomics.
1326 
1327     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1328       if (CB->hasFnAttr(Attribute::NoSync))
1329         return true;
1330 
1331       if (isNoSyncIntrinsic(&I))
1332         return true;
1333 
1334       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1335           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1336       return NoSyncAA.isAssumedNoSync();
1337     }
1338 
1339     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1340       return true;
1341 
1342     return false;
1343   };
1344 
1345   auto CheckForNoSync = [&](Instruction &I) {
1346     // At this point we handled all read/write effects and they are all
1347     // nosync, so they can be skipped.
1348     if (I.mayReadOrWriteMemory())
1349       return true;
1350 
1351     // non-convergent and readnone imply nosync.
1352     return !cast<CallBase>(I).isConvergent();
1353   };
1354 
1355   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1356       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1357     return indicatePessimisticFixpoint();
1358 
1359   return ChangeStatus::UNCHANGED;
1360 }
1361 
1362 struct AANoSyncFunction final : public AANoSyncImpl {
1363   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1364       : AANoSyncImpl(IRP, A) {}
1365 
1366   /// See AbstractAttribute::trackStatistics()
1367   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1368 };
1369 
1370 /// NoSync attribute deduction for a call sites.
1371 struct AANoSyncCallSite final : AANoSyncImpl {
1372   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1373       : AANoSyncImpl(IRP, A) {}
1374 
1375   /// See AbstractAttribute::initialize(...).
1376   void initialize(Attributor &A) override {
1377     AANoSyncImpl::initialize(A);
1378     Function *F = getAssociatedFunction();
1379     if (!F || F->isDeclaration())
1380       indicatePessimisticFixpoint();
1381   }
1382 
1383   /// See AbstractAttribute::updateImpl(...).
1384   ChangeStatus updateImpl(Attributor &A) override {
1385     // TODO: Once we have call site specific value information we can provide
1386     //       call site specific liveness information and then it makes
1387     //       sense to specialize attributes for call sites arguments instead of
1388     //       redirecting requests to the callee argument.
1389     Function *F = getAssociatedFunction();
1390     const IRPosition &FnPos = IRPosition::function(*F);
1391     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1392     return clampStateAndIndicateChange(getState(), FnAA.getState());
1393   }
1394 
1395   /// See AbstractAttribute::trackStatistics()
1396   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1397 };
1398 
1399 /// ------------------------ No-Free Attributes ----------------------------
1400 
1401 struct AANoFreeImpl : public AANoFree {
1402   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1403 
1404   /// See AbstractAttribute::updateImpl(...).
1405   ChangeStatus updateImpl(Attributor &A) override {
1406     auto CheckForNoFree = [&](Instruction &I) {
1407       const auto &CB = cast<CallBase>(I);
1408       if (CB.hasFnAttr(Attribute::NoFree))
1409         return true;
1410 
1411       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1412           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1413       return NoFreeAA.isAssumedNoFree();
1414     };
1415 
1416     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1417       return indicatePessimisticFixpoint();
1418     return ChangeStatus::UNCHANGED;
1419   }
1420 
1421   /// See AbstractAttribute::getAsStr().
1422   const std::string getAsStr() const override {
1423     return getAssumed() ? "nofree" : "may-free";
1424   }
1425 };
1426 
1427 struct AANoFreeFunction final : public AANoFreeImpl {
1428   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1429       : AANoFreeImpl(IRP, A) {}
1430 
1431   /// See AbstractAttribute::trackStatistics()
1432   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1433 };
1434 
1435 /// NoFree attribute deduction for a call sites.
1436 struct AANoFreeCallSite final : AANoFreeImpl {
1437   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1438       : AANoFreeImpl(IRP, A) {}
1439 
1440   /// See AbstractAttribute::initialize(...).
1441   void initialize(Attributor &A) override {
1442     AANoFreeImpl::initialize(A);
1443     Function *F = getAssociatedFunction();
1444     if (!F || F->isDeclaration())
1445       indicatePessimisticFixpoint();
1446   }
1447 
1448   /// See AbstractAttribute::updateImpl(...).
1449   ChangeStatus updateImpl(Attributor &A) override {
1450     // TODO: Once we have call site specific value information we can provide
1451     //       call site specific liveness information and then it makes
1452     //       sense to specialize attributes for call sites arguments instead of
1453     //       redirecting requests to the callee argument.
1454     Function *F = getAssociatedFunction();
1455     const IRPosition &FnPos = IRPosition::function(*F);
1456     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1457     return clampStateAndIndicateChange(getState(), FnAA.getState());
1458   }
1459 
1460   /// See AbstractAttribute::trackStatistics()
1461   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1462 };
1463 
1464 /// NoFree attribute for floating values.
1465 struct AANoFreeFloating : AANoFreeImpl {
1466   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1467       : AANoFreeImpl(IRP, A) {}
1468 
1469   /// See AbstractAttribute::trackStatistics()
1470   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1471 
1472   /// See Abstract Attribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     const IRPosition &IRP = getIRPosition();
1475 
1476     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1477         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1478     if (NoFreeAA.isAssumedNoFree())
1479       return ChangeStatus::UNCHANGED;
1480 
1481     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1482     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1483       Instruction *UserI = cast<Instruction>(U.getUser());
1484       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1485         if (CB->isBundleOperand(&U))
1486           return false;
1487         if (!CB->isArgOperand(&U))
1488           return true;
1489         unsigned ArgNo = CB->getArgOperandNo(&U);
1490 
1491         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1492             *this, IRPosition::callsite_argument(*CB, ArgNo),
1493             DepClassTy::REQUIRED);
1494         return NoFreeArg.isAssumedNoFree();
1495       }
1496 
1497       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1498           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1499         Follow = true;
1500         return true;
1501       }
1502       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1503           isa<ReturnInst>(UserI))
1504         return true;
1505 
1506       // Unknown user.
1507       return false;
1508     };
1509     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1510       return indicatePessimisticFixpoint();
1511 
1512     return ChangeStatus::UNCHANGED;
1513   }
1514 };
1515 
1516 /// NoFree attribute for a call site argument.
1517 struct AANoFreeArgument final : AANoFreeFloating {
1518   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1519       : AANoFreeFloating(IRP, A) {}
1520 
1521   /// See AbstractAttribute::trackStatistics()
1522   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1523 };
1524 
1525 /// NoFree attribute for call site arguments.
1526 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1527   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1528       : AANoFreeFloating(IRP, A) {}
1529 
1530   /// See AbstractAttribute::updateImpl(...).
1531   ChangeStatus updateImpl(Attributor &A) override {
1532     // TODO: Once we have call site specific value information we can provide
1533     //       call site specific liveness information and then it makes
1534     //       sense to specialize attributes for call sites arguments instead of
1535     //       redirecting requests to the callee argument.
1536     Argument *Arg = getAssociatedArgument();
1537     if (!Arg)
1538       return indicatePessimisticFixpoint();
1539     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1540     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1541     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1542   }
1543 
1544   /// See AbstractAttribute::trackStatistics()
1545   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1546 };
1547 
1548 /// NoFree attribute for function return value.
1549 struct AANoFreeReturned final : AANoFreeFloating {
1550   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1551       : AANoFreeFloating(IRP, A) {
1552     llvm_unreachable("NoFree is not applicable to function returns!");
1553   }
1554 
1555   /// See AbstractAttribute::initialize(...).
1556   void initialize(Attributor &A) override {
1557     llvm_unreachable("NoFree is not applicable to function returns!");
1558   }
1559 
1560   /// See AbstractAttribute::updateImpl(...).
1561   ChangeStatus updateImpl(Attributor &A) override {
1562     llvm_unreachable("NoFree is not applicable to function returns!");
1563   }
1564 
1565   /// See AbstractAttribute::trackStatistics()
1566   void trackStatistics() const override {}
1567 };
1568 
1569 /// NoFree attribute deduction for a call site return value.
1570 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1571   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1572       : AANoFreeFloating(IRP, A) {}
1573 
1574   ChangeStatus manifest(Attributor &A) override {
1575     return ChangeStatus::UNCHANGED;
1576   }
1577   /// See AbstractAttribute::trackStatistics()
1578   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1579 };
1580 
1581 /// ------------------------ NonNull Argument Attribute ------------------------
1582 static int64_t getKnownNonNullAndDerefBytesForUse(
1583     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1584     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1585   TrackUse = false;
1586 
1587   const Value *UseV = U->get();
1588   if (!UseV->getType()->isPointerTy())
1589     return 0;
1590 
1591   // We need to follow common pointer manipulation uses to the accesses they
1592   // feed into. We can try to be smart to avoid looking through things we do not
1593   // like for now, e.g., non-inbounds GEPs.
1594   if (isa<CastInst>(I)) {
1595     TrackUse = true;
1596     return 0;
1597   }
1598 
1599   if (isa<GetElementPtrInst>(I)) {
1600     TrackUse = true;
1601     return 0;
1602   }
1603 
1604   Type *PtrTy = UseV->getType();
1605   const Function *F = I->getFunction();
1606   bool NullPointerIsDefined =
1607       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1608   const DataLayout &DL = A.getInfoCache().getDL();
1609   if (const auto *CB = dyn_cast<CallBase>(I)) {
1610     if (CB->isBundleOperand(U)) {
1611       if (RetainedKnowledge RK = getKnowledgeFromUse(
1612               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1613         IsNonNull |=
1614             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1615         return RK.ArgValue;
1616       }
1617       return 0;
1618     }
1619 
1620     if (CB->isCallee(U)) {
1621       IsNonNull |= !NullPointerIsDefined;
1622       return 0;
1623     }
1624 
1625     unsigned ArgNo = CB->getArgOperandNo(U);
1626     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1627     // As long as we only use known information there is no need to track
1628     // dependences here.
1629     auto &DerefAA =
1630         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1631     IsNonNull |= DerefAA.isKnownNonNull();
1632     return DerefAA.getKnownDereferenceableBytes();
1633   }
1634 
1635   int64_t Offset;
1636   const Value *Base =
1637       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1638   if (Base) {
1639     if (Base == &AssociatedValue &&
1640         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1641       int64_t DerefBytes =
1642           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1643 
1644       IsNonNull |= !NullPointerIsDefined;
1645       return std::max(int64_t(0), DerefBytes);
1646     }
1647   }
1648 
1649   /// Corner case when an offset is 0.
1650   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1651                                               /*AllowNonInbounds*/ true);
1652   if (Base) {
1653     if (Offset == 0 && Base == &AssociatedValue &&
1654         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1655       int64_t DerefBytes =
1656           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1657       IsNonNull |= !NullPointerIsDefined;
1658       return std::max(int64_t(0), DerefBytes);
1659     }
1660   }
1661 
1662   return 0;
1663 }
1664 
1665 struct AANonNullImpl : AANonNull {
1666   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1667       : AANonNull(IRP, A),
1668         NullIsDefined(NullPointerIsDefined(
1669             getAnchorScope(),
1670             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1671 
1672   /// See AbstractAttribute::initialize(...).
1673   void initialize(Attributor &A) override {
1674     Value &V = getAssociatedValue();
1675     if (!NullIsDefined &&
1676         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1677                 /* IgnoreSubsumingPositions */ false, &A)) {
1678       indicateOptimisticFixpoint();
1679       return;
1680     }
1681 
1682     if (isa<ConstantPointerNull>(V)) {
1683       indicatePessimisticFixpoint();
1684       return;
1685     }
1686 
1687     AANonNull::initialize(A);
1688 
1689     bool CanBeNull, CanBeFreed;
1690     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1691                                          CanBeFreed)) {
1692       if (!CanBeNull) {
1693         indicateOptimisticFixpoint();
1694         return;
1695       }
1696     }
1697 
1698     if (isa<GlobalValue>(&getAssociatedValue())) {
1699       indicatePessimisticFixpoint();
1700       return;
1701     }
1702 
1703     if (Instruction *CtxI = getCtxI())
1704       followUsesInMBEC(*this, A, getState(), *CtxI);
1705   }
1706 
1707   /// See followUsesInMBEC
1708   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1709                        AANonNull::StateType &State) {
1710     bool IsNonNull = false;
1711     bool TrackUse = false;
1712     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1713                                        IsNonNull, TrackUse);
1714     State.setKnown(IsNonNull);
1715     return TrackUse;
1716   }
1717 
1718   /// See AbstractAttribute::getAsStr().
1719   const std::string getAsStr() const override {
1720     return getAssumed() ? "nonnull" : "may-null";
1721   }
1722 
1723   /// Flag to determine if the underlying value can be null and still allow
1724   /// valid accesses.
1725   const bool NullIsDefined;
1726 };
1727 
1728 /// NonNull attribute for a floating value.
1729 struct AANonNullFloating : public AANonNullImpl {
1730   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1731       : AANonNullImpl(IRP, A) {}
1732 
1733   /// See AbstractAttribute::updateImpl(...).
1734   ChangeStatus updateImpl(Attributor &A) override {
1735     const DataLayout &DL = A.getDataLayout();
1736 
1737     DominatorTree *DT = nullptr;
1738     AssumptionCache *AC = nullptr;
1739     InformationCache &InfoCache = A.getInfoCache();
1740     if (const Function *Fn = getAnchorScope()) {
1741       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1742       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1743     }
1744 
1745     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1746                             AANonNull::StateType &T, bool Stripped) -> bool {
1747       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1748                                              DepClassTy::REQUIRED);
1749       if (!Stripped && this == &AA) {
1750         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1751           T.indicatePessimisticFixpoint();
1752       } else {
1753         // Use abstract attribute information.
1754         const AANonNull::StateType &NS = AA.getState();
1755         T ^= NS;
1756       }
1757       return T.isValidState();
1758     };
1759 
1760     StateType T;
1761     if (!genericValueTraversal<AANonNull, StateType>(
1762             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1763       return indicatePessimisticFixpoint();
1764 
1765     return clampStateAndIndicateChange(getState(), T);
1766   }
1767 
1768   /// See AbstractAttribute::trackStatistics()
1769   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1770 };
1771 
1772 /// NonNull attribute for function return value.
1773 struct AANonNullReturned final
1774     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1775   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1776       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1777 
1778   /// See AbstractAttribute::getAsStr().
1779   const std::string getAsStr() const override {
1780     return getAssumed() ? "nonnull" : "may-null";
1781   }
1782 
1783   /// See AbstractAttribute::trackStatistics()
1784   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1785 };
1786 
1787 /// NonNull attribute for function argument.
1788 struct AANonNullArgument final
1789     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1790   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1791       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1792 
1793   /// See AbstractAttribute::trackStatistics()
1794   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1795 };
1796 
1797 struct AANonNullCallSiteArgument final : AANonNullFloating {
1798   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1799       : AANonNullFloating(IRP, A) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1803 };
1804 
1805 /// NonNull attribute for a call site return position.
1806 struct AANonNullCallSiteReturned final
1807     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1808   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1809       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1810 
1811   /// See AbstractAttribute::trackStatistics()
1812   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1813 };
1814 
1815 /// ------------------------ No-Recurse Attributes ----------------------------
1816 
1817 struct AANoRecurseImpl : public AANoRecurse {
1818   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1819 
1820   /// See AbstractAttribute::getAsStr()
1821   const std::string getAsStr() const override {
1822     return getAssumed() ? "norecurse" : "may-recurse";
1823   }
1824 };
1825 
1826 struct AANoRecurseFunction final : AANoRecurseImpl {
1827   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1828       : AANoRecurseImpl(IRP, A) {}
1829 
1830   /// See AbstractAttribute::initialize(...).
1831   void initialize(Attributor &A) override {
1832     AANoRecurseImpl::initialize(A);
1833     if (const Function *F = getAnchorScope())
1834       if (A.getInfoCache().getSccSize(*F) != 1)
1835         indicatePessimisticFixpoint();
1836   }
1837 
1838   /// See AbstractAttribute::updateImpl(...).
1839   ChangeStatus updateImpl(Attributor &A) override {
1840 
1841     // If all live call sites are known to be no-recurse, we are as well.
1842     auto CallSitePred = [&](AbstractCallSite ACS) {
1843       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1844           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1845           DepClassTy::NONE);
1846       return NoRecurseAA.isKnownNoRecurse();
1847     };
1848     bool AllCallSitesKnown;
1849     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1850       // If we know all call sites and all are known no-recurse, we are done.
1851       // If all known call sites, which might not be all that exist, are known
1852       // to be no-recurse, we are not done but we can continue to assume
1853       // no-recurse. If one of the call sites we have not visited will become
1854       // live, another update is triggered.
1855       if (AllCallSitesKnown)
1856         indicateOptimisticFixpoint();
1857       return ChangeStatus::UNCHANGED;
1858     }
1859 
1860     // If the above check does not hold anymore we look at the calls.
1861     auto CheckForNoRecurse = [&](Instruction &I) {
1862       const auto &CB = cast<CallBase>(I);
1863       if (CB.hasFnAttr(Attribute::NoRecurse))
1864         return true;
1865 
1866       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1867           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1868       if (!NoRecurseAA.isAssumedNoRecurse())
1869         return false;
1870 
1871       // Recursion to the same function
1872       if (CB.getCalledFunction() == getAnchorScope())
1873         return false;
1874 
1875       return true;
1876     };
1877 
1878     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1879       return indicatePessimisticFixpoint();
1880     return ChangeStatus::UNCHANGED;
1881   }
1882 
1883   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1884 };
1885 
1886 /// NoRecurse attribute deduction for a call sites.
1887 struct AANoRecurseCallSite final : AANoRecurseImpl {
1888   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1889       : AANoRecurseImpl(IRP, A) {}
1890 
1891   /// See AbstractAttribute::initialize(...).
1892   void initialize(Attributor &A) override {
1893     AANoRecurseImpl::initialize(A);
1894     Function *F = getAssociatedFunction();
1895     if (!F || F->isDeclaration())
1896       indicatePessimisticFixpoint();
1897   }
1898 
1899   /// See AbstractAttribute::updateImpl(...).
1900   ChangeStatus updateImpl(Attributor &A) override {
1901     // TODO: Once we have call site specific value information we can provide
1902     //       call site specific liveness information and then it makes
1903     //       sense to specialize attributes for call sites arguments instead of
1904     //       redirecting requests to the callee argument.
1905     Function *F = getAssociatedFunction();
1906     const IRPosition &FnPos = IRPosition::function(*F);
1907     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1908     return clampStateAndIndicateChange(getState(), FnAA.getState());
1909   }
1910 
1911   /// See AbstractAttribute::trackStatistics()
1912   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1913 };
1914 
1915 /// -------------------- Undefined-Behavior Attributes ------------------------
1916 
1917 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1918   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1919       : AAUndefinedBehavior(IRP, A) {}
1920 
1921   /// See AbstractAttribute::updateImpl(...).
1922   // through a pointer (i.e. also branches etc.)
1923   ChangeStatus updateImpl(Attributor &A) override {
1924     const size_t UBPrevSize = KnownUBInsts.size();
1925     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1926 
1927     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1928       // Skip instructions that are already saved.
1929       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1930         return true;
1931 
1932       // If we reach here, we know we have an instruction
1933       // that accesses memory through a pointer operand,
1934       // for which getPointerOperand() should give it to us.
1935       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1936       assert(PtrOp &&
1937              "Expected pointer operand of memory accessing instruction");
1938 
1939       // Either we stopped and the appropriate action was taken,
1940       // or we got back a simplified value to continue.
1941       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1942       if (!SimplifiedPtrOp.hasValue())
1943         return true;
1944       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1945 
1946       // A memory access through a pointer is considered UB
1947       // only if the pointer has constant null value.
1948       // TODO: Expand it to not only check constant values.
1949       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1950         AssumedNoUBInsts.insert(&I);
1951         return true;
1952       }
1953       const Type *PtrTy = PtrOpVal->getType();
1954 
1955       // Because we only consider instructions inside functions,
1956       // assume that a parent function exists.
1957       const Function *F = I.getFunction();
1958 
1959       // A memory access using constant null pointer is only considered UB
1960       // if null pointer is _not_ defined for the target platform.
1961       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1962         AssumedNoUBInsts.insert(&I);
1963       else
1964         KnownUBInsts.insert(&I);
1965       return true;
1966     };
1967 
1968     auto InspectBrInstForUB = [&](Instruction &I) {
1969       // A conditional branch instruction is considered UB if it has `undef`
1970       // condition.
1971 
1972       // Skip instructions that are already saved.
1973       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1974         return true;
1975 
1976       // We know we have a branch instruction.
1977       auto BrInst = cast<BranchInst>(&I);
1978 
1979       // Unconditional branches are never considered UB.
1980       if (BrInst->isUnconditional())
1981         return true;
1982 
1983       // Either we stopped and the appropriate action was taken,
1984       // or we got back a simplified value to continue.
1985       Optional<Value *> SimplifiedCond =
1986           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1987       if (!SimplifiedCond.hasValue())
1988         return true;
1989       AssumedNoUBInsts.insert(&I);
1990       return true;
1991     };
1992 
1993     auto InspectCallSiteForUB = [&](Instruction &I) {
1994       // Check whether a callsite always cause UB or not
1995 
1996       // Skip instructions that are already saved.
1997       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1998         return true;
1999 
2000       // Check nonnull and noundef argument attribute violation for each
2001       // callsite.
2002       CallBase &CB = cast<CallBase>(I);
2003       Function *Callee = CB.getCalledFunction();
2004       if (!Callee)
2005         return true;
2006       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2007         // If current argument is known to be simplified to null pointer and the
2008         // corresponding argument position is known to have nonnull attribute,
2009         // the argument is poison. Furthermore, if the argument is poison and
2010         // the position is known to have noundef attriubte, this callsite is
2011         // considered UB.
2012         if (idx >= Callee->arg_size())
2013           break;
2014         Value *ArgVal = CB.getArgOperand(idx);
2015         if (!ArgVal)
2016           continue;
2017         // Here, we handle three cases.
2018         //   (1) Not having a value means it is dead. (we can replace the value
2019         //       with undef)
2020         //   (2) Simplified to undef. The argument violate noundef attriubte.
2021         //   (3) Simplified to null pointer where known to be nonnull.
2022         //       The argument is a poison value and violate noundef attribute.
2023         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2024         auto &NoUndefAA =
2025             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2026         if (!NoUndefAA.isKnownNoUndef())
2027           continue;
2028         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2029             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2030         if (!ValueSimplifyAA.isKnown())
2031           continue;
2032         Optional<Value *> SimplifiedVal =
2033             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2034         if (!SimplifiedVal.hasValue() ||
2035             isa<UndefValue>(*SimplifiedVal.getValue())) {
2036           KnownUBInsts.insert(&I);
2037           continue;
2038         }
2039         if (!ArgVal->getType()->isPointerTy() ||
2040             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2041           continue;
2042         auto &NonNullAA =
2043             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2044         if (NonNullAA.isKnownNonNull())
2045           KnownUBInsts.insert(&I);
2046       }
2047       return true;
2048     };
2049 
2050     auto InspectReturnInstForUB =
2051         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2052           // Check if a return instruction always cause UB or not
2053           // Note: It is guaranteed that the returned position of the anchor
2054           //       scope has noundef attribute when this is called.
2055           //       We also ensure the return position is not "assumed dead"
2056           //       because the returned value was then potentially simplified to
2057           //       `undef` in AAReturnedValues without removing the `noundef`
2058           //       attribute yet.
2059 
2060           // When the returned position has noundef attriubte, UB occur in the
2061           // following cases.
2062           //   (1) Returned value is known to be undef.
2063           //   (2) The value is known to be a null pointer and the returned
2064           //       position has nonnull attribute (because the returned value is
2065           //       poison).
2066           bool FoundUB = false;
2067           if (isa<UndefValue>(V)) {
2068             FoundUB = true;
2069           } else {
2070             if (isa<ConstantPointerNull>(V)) {
2071               auto &NonNullAA = A.getAAFor<AANonNull>(
2072                   *this, IRPosition::returned(*getAnchorScope()),
2073                   DepClassTy::NONE);
2074               if (NonNullAA.isKnownNonNull())
2075                 FoundUB = true;
2076             }
2077           }
2078 
2079           if (FoundUB)
2080             for (ReturnInst *RI : RetInsts)
2081               KnownUBInsts.insert(RI);
2082           return true;
2083         };
2084 
2085     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2086                               {Instruction::Load, Instruction::Store,
2087                                Instruction::AtomicCmpXchg,
2088                                Instruction::AtomicRMW},
2089                               /* CheckBBLivenessOnly */ true);
2090     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2091                               /* CheckBBLivenessOnly */ true);
2092     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2093 
2094     // If the returned position of the anchor scope has noundef attriubte, check
2095     // all returned instructions.
2096     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2097       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2098       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2099         auto &RetPosNoUndefAA =
2100             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2101         if (RetPosNoUndefAA.isKnownNoUndef())
2102           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2103                                                     *this);
2104       }
2105     }
2106 
2107     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2108         UBPrevSize != KnownUBInsts.size())
2109       return ChangeStatus::CHANGED;
2110     return ChangeStatus::UNCHANGED;
2111   }
2112 
2113   bool isKnownToCauseUB(Instruction *I) const override {
2114     return KnownUBInsts.count(I);
2115   }
2116 
2117   bool isAssumedToCauseUB(Instruction *I) const override {
2118     // In simple words, if an instruction is not in the assumed to _not_
2119     // cause UB, then it is assumed UB (that includes those
2120     // in the KnownUBInsts set). The rest is boilerplate
2121     // is to ensure that it is one of the instructions we test
2122     // for UB.
2123 
2124     switch (I->getOpcode()) {
2125     case Instruction::Load:
2126     case Instruction::Store:
2127     case Instruction::AtomicCmpXchg:
2128     case Instruction::AtomicRMW:
2129       return !AssumedNoUBInsts.count(I);
2130     case Instruction::Br: {
2131       auto BrInst = cast<BranchInst>(I);
2132       if (BrInst->isUnconditional())
2133         return false;
2134       return !AssumedNoUBInsts.count(I);
2135     } break;
2136     default:
2137       return false;
2138     }
2139     return false;
2140   }
2141 
2142   ChangeStatus manifest(Attributor &A) override {
2143     if (KnownUBInsts.empty())
2144       return ChangeStatus::UNCHANGED;
2145     for (Instruction *I : KnownUBInsts)
2146       A.changeToUnreachableAfterManifest(I);
2147     return ChangeStatus::CHANGED;
2148   }
2149 
2150   /// See AbstractAttribute::getAsStr()
2151   const std::string getAsStr() const override {
2152     return getAssumed() ? "undefined-behavior" : "no-ub";
2153   }
2154 
2155   /// Note: The correctness of this analysis depends on the fact that the
2156   /// following 2 sets will stop changing after some point.
2157   /// "Change" here means that their size changes.
2158   /// The size of each set is monotonically increasing
2159   /// (we only add items to them) and it is upper bounded by the number of
2160   /// instructions in the processed function (we can never save more
2161   /// elements in either set than this number). Hence, at some point,
2162   /// they will stop increasing.
2163   /// Consequently, at some point, both sets will have stopped
2164   /// changing, effectively making the analysis reach a fixpoint.
2165 
2166   /// Note: These 2 sets are disjoint and an instruction can be considered
2167   /// one of 3 things:
2168   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2169   ///    the KnownUBInsts set.
2170   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2171   ///    has a reason to assume it).
2172   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2173   ///    could not find a reason to assume or prove that it can cause UB,
2174   ///    hence it assumes it doesn't. We have a set for these instructions
2175   ///    so that we don't reprocess them in every update.
2176   ///    Note however that instructions in this set may cause UB.
2177 
2178 protected:
2179   /// A set of all live instructions _known_ to cause UB.
2180   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2181 
2182 private:
2183   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2184   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2185 
2186   // Should be called on updates in which if we're processing an instruction
2187   // \p I that depends on a value \p V, one of the following has to happen:
2188   // - If the value is assumed, then stop.
2189   // - If the value is known but undef, then consider it UB.
2190   // - Otherwise, do specific processing with the simplified value.
2191   // We return None in the first 2 cases to signify that an appropriate
2192   // action was taken and the caller should stop.
2193   // Otherwise, we return the simplified value that the caller should
2194   // use for specific processing.
2195   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2196                                          Instruction *I) {
2197     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2198         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2199     Optional<Value *> SimplifiedV =
2200         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2201     if (!ValueSimplifyAA.isKnown()) {
2202       // Don't depend on assumed values.
2203       return llvm::None;
2204     }
2205     if (!SimplifiedV.hasValue()) {
2206       // If it is known (which we tested above) but it doesn't have a value,
2207       // then we can assume `undef` and hence the instruction is UB.
2208       KnownUBInsts.insert(I);
2209       return llvm::None;
2210     }
2211     Value *Val = SimplifiedV.getValue();
2212     if (isa<UndefValue>(Val)) {
2213       KnownUBInsts.insert(I);
2214       return llvm::None;
2215     }
2216     return Val;
2217   }
2218 };
2219 
2220 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2221   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2222       : AAUndefinedBehaviorImpl(IRP, A) {}
2223 
2224   /// See AbstractAttribute::trackStatistics()
2225   void trackStatistics() const override {
2226     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2227                "Number of instructions known to have UB");
2228     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2229         KnownUBInsts.size();
2230   }
2231 };
2232 
2233 /// ------------------------ Will-Return Attributes ----------------------------
2234 
2235 // Helper function that checks whether a function has any cycle which we don't
2236 // know if it is bounded or not.
2237 // Loops with maximum trip count are considered bounded, any other cycle not.
2238 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2239   ScalarEvolution *SE =
2240       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2241   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2242   // If either SCEV or LoopInfo is not available for the function then we assume
2243   // any cycle to be unbounded cycle.
2244   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2245   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2246   if (!SE || !LI) {
2247     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2248       if (SCCI.hasCycle())
2249         return true;
2250     return false;
2251   }
2252 
2253   // If there's irreducible control, the function may contain non-loop cycles.
2254   if (mayContainIrreducibleControl(F, LI))
2255     return true;
2256 
2257   // Any loop that does not have a max trip count is considered unbounded cycle.
2258   for (auto *L : LI->getLoopsInPreorder()) {
2259     if (!SE->getSmallConstantMaxTripCount(L))
2260       return true;
2261   }
2262   return false;
2263 }
2264 
2265 struct AAWillReturnImpl : public AAWillReturn {
2266   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2267       : AAWillReturn(IRP, A) {}
2268 
2269   /// See AbstractAttribute::initialize(...).
2270   void initialize(Attributor &A) override {
2271     AAWillReturn::initialize(A);
2272 
2273     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2274       indicateOptimisticFixpoint();
2275       return;
2276     }
2277   }
2278 
2279   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2280   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2281     // Check for `mustprogress` in the scope and the associated function which
2282     // might be different if this is a call site.
2283     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2284         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2285       return false;
2286 
2287     const auto &MemAA =
2288         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2289     if (!MemAA.isAssumedReadOnly())
2290       return false;
2291     if (KnownOnly && !MemAA.isKnownReadOnly())
2292       return false;
2293     if (!MemAA.isKnownReadOnly())
2294       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2295 
2296     return true;
2297   }
2298 
2299   /// See AbstractAttribute::updateImpl(...).
2300   ChangeStatus updateImpl(Attributor &A) override {
2301     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2302       return ChangeStatus::UNCHANGED;
2303 
2304     auto CheckForWillReturn = [&](Instruction &I) {
2305       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2306       const auto &WillReturnAA =
2307           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2308       if (WillReturnAA.isKnownWillReturn())
2309         return true;
2310       if (!WillReturnAA.isAssumedWillReturn())
2311         return false;
2312       const auto &NoRecurseAA =
2313           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2314       return NoRecurseAA.isAssumedNoRecurse();
2315     };
2316 
2317     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2318       return indicatePessimisticFixpoint();
2319 
2320     return ChangeStatus::UNCHANGED;
2321   }
2322 
2323   /// See AbstractAttribute::getAsStr()
2324   const std::string getAsStr() const override {
2325     return getAssumed() ? "willreturn" : "may-noreturn";
2326   }
2327 };
2328 
2329 struct AAWillReturnFunction final : AAWillReturnImpl {
2330   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2331       : AAWillReturnImpl(IRP, A) {}
2332 
2333   /// See AbstractAttribute::initialize(...).
2334   void initialize(Attributor &A) override {
2335     AAWillReturnImpl::initialize(A);
2336 
2337     Function *F = getAnchorScope();
2338     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2339       indicatePessimisticFixpoint();
2340   }
2341 
2342   /// See AbstractAttribute::trackStatistics()
2343   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2344 };
2345 
2346 /// WillReturn attribute deduction for a call sites.
2347 struct AAWillReturnCallSite final : AAWillReturnImpl {
2348   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2349       : AAWillReturnImpl(IRP, A) {}
2350 
2351   /// See AbstractAttribute::initialize(...).
2352   void initialize(Attributor &A) override {
2353     AAWillReturnImpl::initialize(A);
2354     Function *F = getAssociatedFunction();
2355     if (!F || !A.isFunctionIPOAmendable(*F))
2356       indicatePessimisticFixpoint();
2357   }
2358 
2359   /// See AbstractAttribute::updateImpl(...).
2360   ChangeStatus updateImpl(Attributor &A) override {
2361     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2362       return ChangeStatus::UNCHANGED;
2363 
2364     // TODO: Once we have call site specific value information we can provide
2365     //       call site specific liveness information and then it makes
2366     //       sense to specialize attributes for call sites arguments instead of
2367     //       redirecting requests to the callee argument.
2368     Function *F = getAssociatedFunction();
2369     const IRPosition &FnPos = IRPosition::function(*F);
2370     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2371     return clampStateAndIndicateChange(getState(), FnAA.getState());
2372   }
2373 
2374   /// See AbstractAttribute::trackStatistics()
2375   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2376 };
2377 
2378 /// -------------------AAReachability Attribute--------------------------
2379 
2380 struct AAReachabilityImpl : AAReachability {
2381   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2382       : AAReachability(IRP, A) {}
2383 
2384   const std::string getAsStr() const override {
2385     // TODO: Return the number of reachable queries.
2386     return "reachable";
2387   }
2388 
2389   /// See AbstractAttribute::initialize(...).
2390   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2391 
2392   /// See AbstractAttribute::updateImpl(...).
2393   ChangeStatus updateImpl(Attributor &A) override {
2394     return indicatePessimisticFixpoint();
2395   }
2396 };
2397 
2398 struct AAReachabilityFunction final : public AAReachabilityImpl {
2399   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2400       : AAReachabilityImpl(IRP, A) {}
2401 
2402   /// See AbstractAttribute::trackStatistics()
2403   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2404 };
2405 
2406 /// ------------------------ NoAlias Argument Attribute ------------------------
2407 
2408 struct AANoAliasImpl : AANoAlias {
2409   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2410     assert(getAssociatedType()->isPointerTy() &&
2411            "Noalias is a pointer attribute");
2412   }
2413 
2414   const std::string getAsStr() const override {
2415     return getAssumed() ? "noalias" : "may-alias";
2416   }
2417 };
2418 
2419 /// NoAlias attribute for a floating value.
2420 struct AANoAliasFloating final : AANoAliasImpl {
2421   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2422       : AANoAliasImpl(IRP, A) {}
2423 
2424   /// See AbstractAttribute::initialize(...).
2425   void initialize(Attributor &A) override {
2426     AANoAliasImpl::initialize(A);
2427     Value *Val = &getAssociatedValue();
2428     do {
2429       CastInst *CI = dyn_cast<CastInst>(Val);
2430       if (!CI)
2431         break;
2432       Value *Base = CI->getOperand(0);
2433       if (!Base->hasOneUse())
2434         break;
2435       Val = Base;
2436     } while (true);
2437 
2438     if (!Val->getType()->isPointerTy()) {
2439       indicatePessimisticFixpoint();
2440       return;
2441     }
2442 
2443     if (isa<AllocaInst>(Val))
2444       indicateOptimisticFixpoint();
2445     else if (isa<ConstantPointerNull>(Val) &&
2446              !NullPointerIsDefined(getAnchorScope(),
2447                                    Val->getType()->getPointerAddressSpace()))
2448       indicateOptimisticFixpoint();
2449     else if (Val != &getAssociatedValue()) {
2450       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2451           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2452       if (ValNoAliasAA.isKnownNoAlias())
2453         indicateOptimisticFixpoint();
2454     }
2455   }
2456 
2457   /// See AbstractAttribute::updateImpl(...).
2458   ChangeStatus updateImpl(Attributor &A) override {
2459     // TODO: Implement this.
2460     return indicatePessimisticFixpoint();
2461   }
2462 
2463   /// See AbstractAttribute::trackStatistics()
2464   void trackStatistics() const override {
2465     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2466   }
2467 };
2468 
2469 /// NoAlias attribute for an argument.
2470 struct AANoAliasArgument final
2471     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2472   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2473   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2474 
2475   /// See AbstractAttribute::initialize(...).
2476   void initialize(Attributor &A) override {
2477     Base::initialize(A);
2478     // See callsite argument attribute and callee argument attribute.
2479     if (hasAttr({Attribute::ByVal}))
2480       indicateOptimisticFixpoint();
2481   }
2482 
2483   /// See AbstractAttribute::update(...).
2484   ChangeStatus updateImpl(Attributor &A) override {
2485     // We have to make sure no-alias on the argument does not break
2486     // synchronization when this is a callback argument, see also [1] below.
2487     // If synchronization cannot be affected, we delegate to the base updateImpl
2488     // function, otherwise we give up for now.
2489 
2490     // If the function is no-sync, no-alias cannot break synchronization.
2491     const auto &NoSyncAA =
2492         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2493                              DepClassTy::OPTIONAL);
2494     if (NoSyncAA.isAssumedNoSync())
2495       return Base::updateImpl(A);
2496 
2497     // If the argument is read-only, no-alias cannot break synchronization.
2498     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2499         *this, getIRPosition(), DepClassTy::OPTIONAL);
2500     if (MemBehaviorAA.isAssumedReadOnly())
2501       return Base::updateImpl(A);
2502 
2503     // If the argument is never passed through callbacks, no-alias cannot break
2504     // synchronization.
2505     bool AllCallSitesKnown;
2506     if (A.checkForAllCallSites(
2507             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2508             true, AllCallSitesKnown))
2509       return Base::updateImpl(A);
2510 
2511     // TODO: add no-alias but make sure it doesn't break synchronization by
2512     // introducing fake uses. See:
2513     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2514     //     International Workshop on OpenMP 2018,
2515     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2516 
2517     return indicatePessimisticFixpoint();
2518   }
2519 
2520   /// See AbstractAttribute::trackStatistics()
2521   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2522 };
2523 
2524 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2525   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2526       : AANoAliasImpl(IRP, A) {}
2527 
2528   /// See AbstractAttribute::initialize(...).
2529   void initialize(Attributor &A) override {
2530     // See callsite argument attribute and callee argument attribute.
2531     const auto &CB = cast<CallBase>(getAnchorValue());
2532     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2533       indicateOptimisticFixpoint();
2534     Value &Val = getAssociatedValue();
2535     if (isa<ConstantPointerNull>(Val) &&
2536         !NullPointerIsDefined(getAnchorScope(),
2537                               Val.getType()->getPointerAddressSpace()))
2538       indicateOptimisticFixpoint();
2539   }
2540 
2541   /// Determine if the underlying value may alias with the call site argument
2542   /// \p OtherArgNo of \p ICS (= the underlying call site).
2543   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2544                             const AAMemoryBehavior &MemBehaviorAA,
2545                             const CallBase &CB, unsigned OtherArgNo) {
2546     // We do not need to worry about aliasing with the underlying IRP.
2547     if (this->getCalleeArgNo() == (int)OtherArgNo)
2548       return false;
2549 
2550     // If it is not a pointer or pointer vector we do not alias.
2551     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2552     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2553       return false;
2554 
2555     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2556         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2557 
2558     // If the argument is readnone, there is no read-write aliasing.
2559     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2560       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2561       return false;
2562     }
2563 
2564     // If the argument is readonly and the underlying value is readonly, there
2565     // is no read-write aliasing.
2566     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2567     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2568       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2569       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2570       return false;
2571     }
2572 
2573     // We have to utilize actual alias analysis queries so we need the object.
2574     if (!AAR)
2575       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2576 
2577     // Try to rule it out at the call site.
2578     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2579     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2580                          "callsite arguments: "
2581                       << getAssociatedValue() << " " << *ArgOp << " => "
2582                       << (IsAliasing ? "" : "no-") << "alias \n");
2583 
2584     return IsAliasing;
2585   }
2586 
2587   bool
2588   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2589                                          const AAMemoryBehavior &MemBehaviorAA,
2590                                          const AANoAlias &NoAliasAA) {
2591     // We can deduce "noalias" if the following conditions hold.
2592     // (i)   Associated value is assumed to be noalias in the definition.
2593     // (ii)  Associated value is assumed to be no-capture in all the uses
2594     //       possibly executed before this callsite.
2595     // (iii) There is no other pointer argument which could alias with the
2596     //       value.
2597 
2598     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2599     if (!AssociatedValueIsNoAliasAtDef) {
2600       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2601                         << " is not no-alias at the definition\n");
2602       return false;
2603     }
2604 
2605     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2606 
2607     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2608     const Function *ScopeFn = VIRP.getAnchorScope();
2609     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2610     // Check whether the value is captured in the scope using AANoCapture.
2611     //      Look at CFG and check only uses possibly executed before this
2612     //      callsite.
2613     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2614       Instruction *UserI = cast<Instruction>(U.getUser());
2615 
2616       // If UserI is the curr instruction and there is a single potential use of
2617       // the value in UserI we allow the use.
2618       // TODO: We should inspect the operands and allow those that cannot alias
2619       //       with the value.
2620       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2621         return true;
2622 
2623       if (ScopeFn) {
2624         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2625             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2626 
2627         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2628           return true;
2629 
2630         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2631           if (CB->isArgOperand(&U)) {
2632 
2633             unsigned ArgNo = CB->getArgOperandNo(&U);
2634 
2635             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2636                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2637                 DepClassTy::OPTIONAL);
2638 
2639             if (NoCaptureAA.isAssumedNoCapture())
2640               return true;
2641           }
2642         }
2643       }
2644 
2645       // For cases which can potentially have more users
2646       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2647           isa<SelectInst>(U)) {
2648         Follow = true;
2649         return true;
2650       }
2651 
2652       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2653       return false;
2654     };
2655 
2656     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2657       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2658         LLVM_DEBUG(
2659             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2660                    << " cannot be noalias as it is potentially captured\n");
2661         return false;
2662       }
2663     }
2664     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2665 
2666     // Check there is no other pointer argument which could alias with the
2667     // value passed at this call site.
2668     // TODO: AbstractCallSite
2669     const auto &CB = cast<CallBase>(getAnchorValue());
2670     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2671          OtherArgNo++)
2672       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2673         return false;
2674 
2675     return true;
2676   }
2677 
2678   /// See AbstractAttribute::updateImpl(...).
2679   ChangeStatus updateImpl(Attributor &A) override {
2680     // If the argument is readnone we are done as there are no accesses via the
2681     // argument.
2682     auto &MemBehaviorAA =
2683         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2684     if (MemBehaviorAA.isAssumedReadNone()) {
2685       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2686       return ChangeStatus::UNCHANGED;
2687     }
2688 
2689     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2690     const auto &NoAliasAA =
2691         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2692 
2693     AAResults *AAR = nullptr;
2694     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2695                                                NoAliasAA)) {
2696       LLVM_DEBUG(
2697           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2698       return ChangeStatus::UNCHANGED;
2699     }
2700 
2701     return indicatePessimisticFixpoint();
2702   }
2703 
2704   /// See AbstractAttribute::trackStatistics()
2705   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2706 };
2707 
2708 /// NoAlias attribute for function return value.
2709 struct AANoAliasReturned final : AANoAliasImpl {
2710   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2711       : AANoAliasImpl(IRP, A) {}
2712 
2713   /// See AbstractAttribute::initialize(...).
2714   void initialize(Attributor &A) override {
2715     AANoAliasImpl::initialize(A);
2716     Function *F = getAssociatedFunction();
2717     if (!F || F->isDeclaration())
2718       indicatePessimisticFixpoint();
2719   }
2720 
2721   /// See AbstractAttribute::updateImpl(...).
2722   virtual ChangeStatus updateImpl(Attributor &A) override {
2723 
2724     auto CheckReturnValue = [&](Value &RV) -> bool {
2725       if (Constant *C = dyn_cast<Constant>(&RV))
2726         if (C->isNullValue() || isa<UndefValue>(C))
2727           return true;
2728 
2729       /// For now, we can only deduce noalias if we have call sites.
2730       /// FIXME: add more support.
2731       if (!isa<CallBase>(&RV))
2732         return false;
2733 
2734       const IRPosition &RVPos = IRPosition::value(RV);
2735       const auto &NoAliasAA =
2736           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2737       if (!NoAliasAA.isAssumedNoAlias())
2738         return false;
2739 
2740       const auto &NoCaptureAA =
2741           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2742       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2743     };
2744 
2745     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2746       return indicatePessimisticFixpoint();
2747 
2748     return ChangeStatus::UNCHANGED;
2749   }
2750 
2751   /// See AbstractAttribute::trackStatistics()
2752   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2753 };
2754 
2755 /// NoAlias attribute deduction for a call site return value.
2756 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2757   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2758       : AANoAliasImpl(IRP, A) {}
2759 
2760   /// See AbstractAttribute::initialize(...).
2761   void initialize(Attributor &A) override {
2762     AANoAliasImpl::initialize(A);
2763     Function *F = getAssociatedFunction();
2764     if (!F || F->isDeclaration())
2765       indicatePessimisticFixpoint();
2766   }
2767 
2768   /// See AbstractAttribute::updateImpl(...).
2769   ChangeStatus updateImpl(Attributor &A) override {
2770     // TODO: Once we have call site specific value information we can provide
2771     //       call site specific liveness information and then it makes
2772     //       sense to specialize attributes for call sites arguments instead of
2773     //       redirecting requests to the callee argument.
2774     Function *F = getAssociatedFunction();
2775     const IRPosition &FnPos = IRPosition::returned(*F);
2776     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2777     return clampStateAndIndicateChange(getState(), FnAA.getState());
2778   }
2779 
2780   /// See AbstractAttribute::trackStatistics()
2781   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2782 };
2783 
2784 /// -------------------AAIsDead Function Attribute-----------------------
2785 
2786 struct AAIsDeadValueImpl : public AAIsDead {
2787   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2788 
2789   /// See AAIsDead::isAssumedDead().
2790   bool isAssumedDead() const override { return getAssumed(); }
2791 
2792   /// See AAIsDead::isKnownDead().
2793   bool isKnownDead() const override { return getKnown(); }
2794 
2795   /// See AAIsDead::isAssumedDead(BasicBlock *).
2796   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2797 
2798   /// See AAIsDead::isKnownDead(BasicBlock *).
2799   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2800 
2801   /// See AAIsDead::isAssumedDead(Instruction *I).
2802   bool isAssumedDead(const Instruction *I) const override {
2803     return I == getCtxI() && isAssumedDead();
2804   }
2805 
2806   /// See AAIsDead::isKnownDead(Instruction *I).
2807   bool isKnownDead(const Instruction *I) const override {
2808     return isAssumedDead(I) && getKnown();
2809   }
2810 
2811   /// See AbstractAttribute::getAsStr().
2812   const std::string getAsStr() const override {
2813     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2814   }
2815 
2816   /// Check if all uses are assumed dead.
2817   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2818     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2819     // Explicitly set the dependence class to required because we want a long
2820     // chain of N dependent instructions to be considered live as soon as one is
2821     // without going through N update cycles. This is not required for
2822     // correctness.
2823     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2824   }
2825 
2826   /// Determine if \p I is assumed to be side-effect free.
2827   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2828     if (!I || wouldInstructionBeTriviallyDead(I))
2829       return true;
2830 
2831     auto *CB = dyn_cast<CallBase>(I);
2832     if (!CB || isa<IntrinsicInst>(CB))
2833       return false;
2834 
2835     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2836     const auto &NoUnwindAA =
2837         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2838     if (!NoUnwindAA.isAssumedNoUnwind())
2839       return false;
2840     if (!NoUnwindAA.isKnownNoUnwind())
2841       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2842 
2843     const auto &MemBehaviorAA =
2844         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2845     if (MemBehaviorAA.isAssumedReadOnly()) {
2846       if (!MemBehaviorAA.isKnownReadOnly())
2847         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2848       return true;
2849     }
2850     return false;
2851   }
2852 };
2853 
2854 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2855   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2856       : AAIsDeadValueImpl(IRP, A) {}
2857 
2858   /// See AbstractAttribute::initialize(...).
2859   void initialize(Attributor &A) override {
2860     if (isa<UndefValue>(getAssociatedValue())) {
2861       indicatePessimisticFixpoint();
2862       return;
2863     }
2864 
2865     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2866     if (!isAssumedSideEffectFree(A, I))
2867       indicatePessimisticFixpoint();
2868   }
2869 
2870   /// See AbstractAttribute::updateImpl(...).
2871   ChangeStatus updateImpl(Attributor &A) override {
2872     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2873     if (!isAssumedSideEffectFree(A, I))
2874       return indicatePessimisticFixpoint();
2875 
2876     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2877       return indicatePessimisticFixpoint();
2878     return ChangeStatus::UNCHANGED;
2879   }
2880 
2881   /// See AbstractAttribute::manifest(...).
2882   ChangeStatus manifest(Attributor &A) override {
2883     Value &V = getAssociatedValue();
2884     if (auto *I = dyn_cast<Instruction>(&V)) {
2885       // If we get here we basically know the users are all dead. We check if
2886       // isAssumedSideEffectFree returns true here again because it might not be
2887       // the case and only the users are dead but the instruction (=call) is
2888       // still needed.
2889       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2890         A.deleteAfterManifest(*I);
2891         return ChangeStatus::CHANGED;
2892       }
2893     }
2894     if (V.use_empty())
2895       return ChangeStatus::UNCHANGED;
2896 
2897     bool UsedAssumedInformation = false;
2898     Optional<Constant *> C =
2899         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2900     if (C.hasValue() && C.getValue())
2901       return ChangeStatus::UNCHANGED;
2902 
2903     // Replace the value with undef as it is dead but keep droppable uses around
2904     // as they provide information we don't want to give up on just yet.
2905     UndefValue &UV = *UndefValue::get(V.getType());
2906     bool AnyChange =
2907         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2908     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2909   }
2910 
2911   /// See AbstractAttribute::trackStatistics()
2912   void trackStatistics() const override {
2913     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2914   }
2915 };
2916 
2917 struct AAIsDeadArgument : public AAIsDeadFloating {
2918   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2919       : AAIsDeadFloating(IRP, A) {}
2920 
2921   /// See AbstractAttribute::initialize(...).
2922   void initialize(Attributor &A) override {
2923     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2924       indicatePessimisticFixpoint();
2925   }
2926 
2927   /// See AbstractAttribute::manifest(...).
2928   ChangeStatus manifest(Attributor &A) override {
2929     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2930     Argument &Arg = *getAssociatedArgument();
2931     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2932       if (A.registerFunctionSignatureRewrite(
2933               Arg, /* ReplacementTypes */ {},
2934               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2935               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2936         Arg.dropDroppableUses();
2937         return ChangeStatus::CHANGED;
2938       }
2939     return Changed;
2940   }
2941 
2942   /// See AbstractAttribute::trackStatistics()
2943   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2944 };
2945 
2946 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2947   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2948       : AAIsDeadValueImpl(IRP, A) {}
2949 
2950   /// See AbstractAttribute::initialize(...).
2951   void initialize(Attributor &A) override {
2952     if (isa<UndefValue>(getAssociatedValue()))
2953       indicatePessimisticFixpoint();
2954   }
2955 
2956   /// See AbstractAttribute::updateImpl(...).
2957   ChangeStatus updateImpl(Attributor &A) override {
2958     // TODO: Once we have call site specific value information we can provide
2959     //       call site specific liveness information and then it makes
2960     //       sense to specialize attributes for call sites arguments instead of
2961     //       redirecting requests to the callee argument.
2962     Argument *Arg = getAssociatedArgument();
2963     if (!Arg)
2964       return indicatePessimisticFixpoint();
2965     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2966     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2967     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2968   }
2969 
2970   /// See AbstractAttribute::manifest(...).
2971   ChangeStatus manifest(Attributor &A) override {
2972     CallBase &CB = cast<CallBase>(getAnchorValue());
2973     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2974     assert(!isa<UndefValue>(U.get()) &&
2975            "Expected undef values to be filtered out!");
2976     UndefValue &UV = *UndefValue::get(U->getType());
2977     if (A.changeUseAfterManifest(U, UV))
2978       return ChangeStatus::CHANGED;
2979     return ChangeStatus::UNCHANGED;
2980   }
2981 
2982   /// See AbstractAttribute::trackStatistics()
2983   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2984 };
2985 
2986 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2987   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2988       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2989 
2990   /// See AAIsDead::isAssumedDead().
2991   bool isAssumedDead() const override {
2992     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2993   }
2994 
2995   /// See AbstractAttribute::initialize(...).
2996   void initialize(Attributor &A) override {
2997     if (isa<UndefValue>(getAssociatedValue())) {
2998       indicatePessimisticFixpoint();
2999       return;
3000     }
3001 
3002     // We track this separately as a secondary state.
3003     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3004   }
3005 
3006   /// See AbstractAttribute::updateImpl(...).
3007   ChangeStatus updateImpl(Attributor &A) override {
3008     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3009     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3010       IsAssumedSideEffectFree = false;
3011       Changed = ChangeStatus::CHANGED;
3012     }
3013 
3014     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3015       return indicatePessimisticFixpoint();
3016     return Changed;
3017   }
3018 
3019   /// See AbstractAttribute::trackStatistics()
3020   void trackStatistics() const override {
3021     if (IsAssumedSideEffectFree)
3022       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3023     else
3024       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3025   }
3026 
3027   /// See AbstractAttribute::getAsStr().
3028   const std::string getAsStr() const override {
3029     return isAssumedDead()
3030                ? "assumed-dead"
3031                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3032   }
3033 
3034 private:
3035   bool IsAssumedSideEffectFree;
3036 };
3037 
3038 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3039   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3040       : AAIsDeadValueImpl(IRP, A) {}
3041 
3042   /// See AbstractAttribute::updateImpl(...).
3043   ChangeStatus updateImpl(Attributor &A) override {
3044 
3045     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3046                               {Instruction::Ret});
3047 
3048     auto PredForCallSite = [&](AbstractCallSite ACS) {
3049       if (ACS.isCallbackCall() || !ACS.getInstruction())
3050         return false;
3051       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3052     };
3053 
3054     bool AllCallSitesKnown;
3055     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3056                                 AllCallSitesKnown))
3057       return indicatePessimisticFixpoint();
3058 
3059     return ChangeStatus::UNCHANGED;
3060   }
3061 
3062   /// See AbstractAttribute::manifest(...).
3063   ChangeStatus manifest(Attributor &A) override {
3064     // TODO: Rewrite the signature to return void?
3065     bool AnyChange = false;
3066     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3067     auto RetInstPred = [&](Instruction &I) {
3068       ReturnInst &RI = cast<ReturnInst>(I);
3069       if (!isa<UndefValue>(RI.getReturnValue()))
3070         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3071       return true;
3072     };
3073     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3074     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3075   }
3076 
3077   /// See AbstractAttribute::trackStatistics()
3078   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3079 };
3080 
3081 struct AAIsDeadFunction : public AAIsDead {
3082   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3083 
3084   /// See AbstractAttribute::initialize(...).
3085   void initialize(Attributor &A) override {
3086     const Function *F = getAnchorScope();
3087     if (F && !F->isDeclaration()) {
3088       // We only want to compute liveness once. If the function is not part of
3089       // the SCC, skip it.
3090       if (A.isRunOn(*const_cast<Function *>(F))) {
3091         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3092         assumeLive(A, F->getEntryBlock());
3093       } else {
3094         indicatePessimisticFixpoint();
3095       }
3096     }
3097   }
3098 
3099   /// See AbstractAttribute::getAsStr().
3100   const std::string getAsStr() const override {
3101     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3102            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3103            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3104            std::to_string(KnownDeadEnds.size()) + "]";
3105   }
3106 
3107   /// See AbstractAttribute::manifest(...).
3108   ChangeStatus manifest(Attributor &A) override {
3109     assert(getState().isValidState() &&
3110            "Attempted to manifest an invalid state!");
3111 
3112     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3113     Function &F = *getAnchorScope();
3114 
3115     if (AssumedLiveBlocks.empty()) {
3116       A.deleteAfterManifest(F);
3117       return ChangeStatus::CHANGED;
3118     }
3119 
3120     // Flag to determine if we can change an invoke to a call assuming the
3121     // callee is nounwind. This is not possible if the personality of the
3122     // function allows to catch asynchronous exceptions.
3123     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3124 
3125     KnownDeadEnds.set_union(ToBeExploredFrom);
3126     for (const Instruction *DeadEndI : KnownDeadEnds) {
3127       auto *CB = dyn_cast<CallBase>(DeadEndI);
3128       if (!CB)
3129         continue;
3130       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3131           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3132       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3133       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3134         continue;
3135 
3136       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3137         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3138       else
3139         A.changeToUnreachableAfterManifest(
3140             const_cast<Instruction *>(DeadEndI->getNextNode()));
3141       HasChanged = ChangeStatus::CHANGED;
3142     }
3143 
3144     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3145     for (BasicBlock &BB : F)
3146       if (!AssumedLiveBlocks.count(&BB)) {
3147         A.deleteAfterManifest(BB);
3148         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3149       }
3150 
3151     return HasChanged;
3152   }
3153 
3154   /// See AbstractAttribute::updateImpl(...).
3155   ChangeStatus updateImpl(Attributor &A) override;
3156 
3157   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3158     return !AssumedLiveEdges.count(std::make_pair(From, To));
3159   }
3160 
3161   /// See AbstractAttribute::trackStatistics()
3162   void trackStatistics() const override {}
3163 
3164   /// Returns true if the function is assumed dead.
3165   bool isAssumedDead() const override { return false; }
3166 
3167   /// See AAIsDead::isKnownDead().
3168   bool isKnownDead() const override { return false; }
3169 
3170   /// See AAIsDead::isAssumedDead(BasicBlock *).
3171   bool isAssumedDead(const BasicBlock *BB) const override {
3172     assert(BB->getParent() == getAnchorScope() &&
3173            "BB must be in the same anchor scope function.");
3174 
3175     if (!getAssumed())
3176       return false;
3177     return !AssumedLiveBlocks.count(BB);
3178   }
3179 
3180   /// See AAIsDead::isKnownDead(BasicBlock *).
3181   bool isKnownDead(const BasicBlock *BB) const override {
3182     return getKnown() && isAssumedDead(BB);
3183   }
3184 
3185   /// See AAIsDead::isAssumed(Instruction *I).
3186   bool isAssumedDead(const Instruction *I) const override {
3187     assert(I->getParent()->getParent() == getAnchorScope() &&
3188            "Instruction must be in the same anchor scope function.");
3189 
3190     if (!getAssumed())
3191       return false;
3192 
3193     // If it is not in AssumedLiveBlocks then it for sure dead.
3194     // Otherwise, it can still be after noreturn call in a live block.
3195     if (!AssumedLiveBlocks.count(I->getParent()))
3196       return true;
3197 
3198     // If it is not after a liveness barrier it is live.
3199     const Instruction *PrevI = I->getPrevNode();
3200     while (PrevI) {
3201       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3202         return true;
3203       PrevI = PrevI->getPrevNode();
3204     }
3205     return false;
3206   }
3207 
3208   /// See AAIsDead::isKnownDead(Instruction *I).
3209   bool isKnownDead(const Instruction *I) const override {
3210     return getKnown() && isAssumedDead(I);
3211   }
3212 
3213   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3214   /// that internal function called from \p BB should now be looked at.
3215   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3216     if (!AssumedLiveBlocks.insert(&BB).second)
3217       return false;
3218 
3219     // We assume that all of BB is (probably) live now and if there are calls to
3220     // internal functions we will assume that those are now live as well. This
3221     // is a performance optimization for blocks with calls to a lot of internal
3222     // functions. It can however cause dead functions to be treated as live.
3223     for (const Instruction &I : BB)
3224       if (const auto *CB = dyn_cast<CallBase>(&I))
3225         if (const Function *F = CB->getCalledFunction())
3226           if (F->hasLocalLinkage())
3227             A.markLiveInternalFunction(*F);
3228     return true;
3229   }
3230 
3231   /// Collection of instructions that need to be explored again, e.g., we
3232   /// did assume they do not transfer control to (one of their) successors.
3233   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3234 
3235   /// Collection of instructions that are known to not transfer control.
3236   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3237 
3238   /// Collection of all assumed live edges
3239   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3240 
3241   /// Collection of all assumed live BasicBlocks.
3242   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3243 };
3244 
3245 static bool
3246 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3247                         AbstractAttribute &AA,
3248                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3249   const IRPosition &IPos = IRPosition::callsite_function(CB);
3250 
3251   const auto &NoReturnAA =
3252       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3253   if (NoReturnAA.isAssumedNoReturn())
3254     return !NoReturnAA.isKnownNoReturn();
3255   if (CB.isTerminator())
3256     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3257   else
3258     AliveSuccessors.push_back(CB.getNextNode());
3259   return false;
3260 }
3261 
3262 static bool
3263 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3264                         AbstractAttribute &AA,
3265                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3266   bool UsedAssumedInformation =
3267       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3268 
3269   // First, determine if we can change an invoke to a call assuming the
3270   // callee is nounwind. This is not possible if the personality of the
3271   // function allows to catch asynchronous exceptions.
3272   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3273     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3274   } else {
3275     const IRPosition &IPos = IRPosition::callsite_function(II);
3276     const auto &AANoUnw =
3277         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3278     if (AANoUnw.isAssumedNoUnwind()) {
3279       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3280     } else {
3281       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3282     }
3283   }
3284   return UsedAssumedInformation;
3285 }
3286 
3287 static bool
3288 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3289                         AbstractAttribute &AA,
3290                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3291   bool UsedAssumedInformation = false;
3292   if (BI.getNumSuccessors() == 1) {
3293     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3294   } else {
3295     Optional<Constant *> C =
3296         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3297     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3298       // No value yet, assume both edges are dead.
3299     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3300       const BasicBlock *SuccBB =
3301           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3302       AliveSuccessors.push_back(&SuccBB->front());
3303     } else {
3304       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3305       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3306       UsedAssumedInformation = false;
3307     }
3308   }
3309   return UsedAssumedInformation;
3310 }
3311 
3312 static bool
3313 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3314                         AbstractAttribute &AA,
3315                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3316   bool UsedAssumedInformation = false;
3317   Optional<Constant *> C =
3318       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3319   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3320     // No value yet, assume all edges are dead.
3321   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3322     for (auto &CaseIt : SI.cases()) {
3323       if (CaseIt.getCaseValue() == C.getValue()) {
3324         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3325         return UsedAssumedInformation;
3326       }
3327     }
3328     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3329     return UsedAssumedInformation;
3330   } else {
3331     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3332       AliveSuccessors.push_back(&SuccBB->front());
3333   }
3334   return UsedAssumedInformation;
3335 }
3336 
3337 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3338   ChangeStatus Change = ChangeStatus::UNCHANGED;
3339 
3340   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3341                     << getAnchorScope()->size() << "] BBs and "
3342                     << ToBeExploredFrom.size() << " exploration points and "
3343                     << KnownDeadEnds.size() << " known dead ends\n");
3344 
3345   // Copy and clear the list of instructions we need to explore from. It is
3346   // refilled with instructions the next update has to look at.
3347   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3348                                                ToBeExploredFrom.end());
3349   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3350 
3351   SmallVector<const Instruction *, 8> AliveSuccessors;
3352   while (!Worklist.empty()) {
3353     const Instruction *I = Worklist.pop_back_val();
3354     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3355 
3356     // Fast forward for uninteresting instructions. We could look for UB here
3357     // though.
3358     while (!I->isTerminator() && !isa<CallBase>(I)) {
3359       Change = ChangeStatus::CHANGED;
3360       I = I->getNextNode();
3361     }
3362 
3363     AliveSuccessors.clear();
3364 
3365     bool UsedAssumedInformation = false;
3366     switch (I->getOpcode()) {
3367     // TODO: look for (assumed) UB to backwards propagate "deadness".
3368     default:
3369       assert(I->isTerminator() &&
3370              "Expected non-terminators to be handled already!");
3371       for (const BasicBlock *SuccBB : successors(I->getParent()))
3372         AliveSuccessors.push_back(&SuccBB->front());
3373       break;
3374     case Instruction::Call:
3375       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3376                                                        *this, AliveSuccessors);
3377       break;
3378     case Instruction::Invoke:
3379       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3380                                                        *this, AliveSuccessors);
3381       break;
3382     case Instruction::Br:
3383       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3384                                                        *this, AliveSuccessors);
3385       break;
3386     case Instruction::Switch:
3387       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3388                                                        *this, AliveSuccessors);
3389       break;
3390     }
3391 
3392     if (UsedAssumedInformation) {
3393       NewToBeExploredFrom.insert(I);
3394     } else {
3395       Change = ChangeStatus::CHANGED;
3396       if (AliveSuccessors.empty() ||
3397           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3398         KnownDeadEnds.insert(I);
3399     }
3400 
3401     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3402                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3403                       << UsedAssumedInformation << "\n");
3404 
3405     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3406       if (!I->isTerminator()) {
3407         assert(AliveSuccessors.size() == 1 &&
3408                "Non-terminator expected to have a single successor!");
3409         Worklist.push_back(AliveSuccessor);
3410       } else {
3411         // record the assumed live edge
3412         AssumedLiveEdges.insert(
3413             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3414         if (assumeLive(A, *AliveSuccessor->getParent()))
3415           Worklist.push_back(AliveSuccessor);
3416       }
3417     }
3418   }
3419 
3420   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3421 
3422   // If we know everything is live there is no need to query for liveness.
3423   // Instead, indicating a pessimistic fixpoint will cause the state to be
3424   // "invalid" and all queries to be answered conservatively without lookups.
3425   // To be in this state we have to (1) finished the exploration and (3) not
3426   // discovered any non-trivial dead end and (2) not ruled unreachable code
3427   // dead.
3428   if (ToBeExploredFrom.empty() &&
3429       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3430       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3431         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3432       }))
3433     return indicatePessimisticFixpoint();
3434   return Change;
3435 }
3436 
3437 /// Liveness information for a call sites.
3438 struct AAIsDeadCallSite final : AAIsDeadFunction {
3439   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3440       : AAIsDeadFunction(IRP, A) {}
3441 
3442   /// See AbstractAttribute::initialize(...).
3443   void initialize(Attributor &A) override {
3444     // TODO: Once we have call site specific value information we can provide
3445     //       call site specific liveness information and then it makes
3446     //       sense to specialize attributes for call sites instead of
3447     //       redirecting requests to the callee.
3448     llvm_unreachable("Abstract attributes for liveness are not "
3449                      "supported for call sites yet!");
3450   }
3451 
3452   /// See AbstractAttribute::updateImpl(...).
3453   ChangeStatus updateImpl(Attributor &A) override {
3454     return indicatePessimisticFixpoint();
3455   }
3456 
3457   /// See AbstractAttribute::trackStatistics()
3458   void trackStatistics() const override {}
3459 };
3460 
3461 /// -------------------- Dereferenceable Argument Attribute --------------------
3462 
3463 template <>
3464 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3465                                                      const DerefState &R) {
3466   ChangeStatus CS0 =
3467       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3468   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3469   return CS0 | CS1;
3470 }
3471 
3472 struct AADereferenceableImpl : AADereferenceable {
3473   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3474       : AADereferenceable(IRP, A) {}
3475   using StateType = DerefState;
3476 
3477   /// See AbstractAttribute::initialize(...).
3478   void initialize(Attributor &A) override {
3479     SmallVector<Attribute, 4> Attrs;
3480     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3481              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3482     for (const Attribute &Attr : Attrs)
3483       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3484 
3485     const IRPosition &IRP = this->getIRPosition();
3486     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3487 
3488     bool CanBeNull, CanBeFreed;
3489     takeKnownDerefBytesMaximum(
3490         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3491             A.getDataLayout(), CanBeNull, CanBeFreed));
3492 
3493     bool IsFnInterface = IRP.isFnInterfaceKind();
3494     Function *FnScope = IRP.getAnchorScope();
3495     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3496       indicatePessimisticFixpoint();
3497       return;
3498     }
3499 
3500     if (Instruction *CtxI = getCtxI())
3501       followUsesInMBEC(*this, A, getState(), *CtxI);
3502   }
3503 
3504   /// See AbstractAttribute::getState()
3505   /// {
3506   StateType &getState() override { return *this; }
3507   const StateType &getState() const override { return *this; }
3508   /// }
3509 
3510   /// Helper function for collecting accessed bytes in must-be-executed-context
3511   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3512                               DerefState &State) {
3513     const Value *UseV = U->get();
3514     if (!UseV->getType()->isPointerTy())
3515       return;
3516 
3517     Type *PtrTy = UseV->getType();
3518     const DataLayout &DL = A.getDataLayout();
3519     int64_t Offset;
3520     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3521             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3522       if (Base == &getAssociatedValue() &&
3523           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3524         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3525         State.addAccessedBytes(Offset, Size);
3526       }
3527     }
3528   }
3529 
3530   /// See followUsesInMBEC
3531   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3532                        AADereferenceable::StateType &State) {
3533     bool IsNonNull = false;
3534     bool TrackUse = false;
3535     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3536         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3537     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3538                       << " for instruction " << *I << "\n");
3539 
3540     addAccessedBytesForUse(A, U, I, State);
3541     State.takeKnownDerefBytesMaximum(DerefBytes);
3542     return TrackUse;
3543   }
3544 
3545   /// See AbstractAttribute::manifest(...).
3546   ChangeStatus manifest(Attributor &A) override {
3547     ChangeStatus Change = AADereferenceable::manifest(A);
3548     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3549       removeAttrs({Attribute::DereferenceableOrNull});
3550       return ChangeStatus::CHANGED;
3551     }
3552     return Change;
3553   }
3554 
3555   void getDeducedAttributes(LLVMContext &Ctx,
3556                             SmallVectorImpl<Attribute> &Attrs) const override {
3557     // TODO: Add *_globally support
3558     if (isAssumedNonNull())
3559       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3560           Ctx, getAssumedDereferenceableBytes()));
3561     else
3562       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3563           Ctx, getAssumedDereferenceableBytes()));
3564   }
3565 
3566   /// See AbstractAttribute::getAsStr().
3567   const std::string getAsStr() const override {
3568     if (!getAssumedDereferenceableBytes())
3569       return "unknown-dereferenceable";
3570     return std::string("dereferenceable") +
3571            (isAssumedNonNull() ? "" : "_or_null") +
3572            (isAssumedGlobal() ? "_globally" : "") + "<" +
3573            std::to_string(getKnownDereferenceableBytes()) + "-" +
3574            std::to_string(getAssumedDereferenceableBytes()) + ">";
3575   }
3576 };
3577 
3578 /// Dereferenceable attribute for a floating value.
3579 struct AADereferenceableFloating : AADereferenceableImpl {
3580   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3581       : AADereferenceableImpl(IRP, A) {}
3582 
3583   /// See AbstractAttribute::updateImpl(...).
3584   ChangeStatus updateImpl(Attributor &A) override {
3585     const DataLayout &DL = A.getDataLayout();
3586 
3587     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3588                             bool Stripped) -> bool {
3589       unsigned IdxWidth =
3590           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3591       APInt Offset(IdxWidth, 0);
3592       const Value *Base =
3593           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3594 
3595       const auto &AA = A.getAAFor<AADereferenceable>(
3596           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3597       int64_t DerefBytes = 0;
3598       if (!Stripped && this == &AA) {
3599         // Use IR information if we did not strip anything.
3600         // TODO: track globally.
3601         bool CanBeNull, CanBeFreed;
3602         DerefBytes =
3603             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3604         T.GlobalState.indicatePessimisticFixpoint();
3605       } else {
3606         const DerefState &DS = AA.getState();
3607         DerefBytes = DS.DerefBytesState.getAssumed();
3608         T.GlobalState &= DS.GlobalState;
3609       }
3610 
3611       // For now we do not try to "increase" dereferenceability due to negative
3612       // indices as we first have to come up with code to deal with loops and
3613       // for overflows of the dereferenceable bytes.
3614       int64_t OffsetSExt = Offset.getSExtValue();
3615       if (OffsetSExt < 0)
3616         OffsetSExt = 0;
3617 
3618       T.takeAssumedDerefBytesMinimum(
3619           std::max(int64_t(0), DerefBytes - OffsetSExt));
3620 
3621       if (this == &AA) {
3622         if (!Stripped) {
3623           // If nothing was stripped IR information is all we got.
3624           T.takeKnownDerefBytesMaximum(
3625               std::max(int64_t(0), DerefBytes - OffsetSExt));
3626           T.indicatePessimisticFixpoint();
3627         } else if (OffsetSExt > 0) {
3628           // If something was stripped but there is circular reasoning we look
3629           // for the offset. If it is positive we basically decrease the
3630           // dereferenceable bytes in a circluar loop now, which will simply
3631           // drive them down to the known value in a very slow way which we
3632           // can accelerate.
3633           T.indicatePessimisticFixpoint();
3634         }
3635       }
3636 
3637       return T.isValidState();
3638     };
3639 
3640     DerefState T;
3641     if (!genericValueTraversal<AADereferenceable, DerefState>(
3642             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3643       return indicatePessimisticFixpoint();
3644 
3645     return clampStateAndIndicateChange(getState(), T);
3646   }
3647 
3648   /// See AbstractAttribute::trackStatistics()
3649   void trackStatistics() const override {
3650     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3651   }
3652 };
3653 
3654 /// Dereferenceable attribute for a return value.
3655 struct AADereferenceableReturned final
3656     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3657   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3658       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3659             IRP, A) {}
3660 
3661   /// See AbstractAttribute::trackStatistics()
3662   void trackStatistics() const override {
3663     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3664   }
3665 };
3666 
3667 /// Dereferenceable attribute for an argument
3668 struct AADereferenceableArgument final
3669     : AAArgumentFromCallSiteArguments<AADereferenceable,
3670                                       AADereferenceableImpl> {
3671   using Base =
3672       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3673   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3674       : Base(IRP, A) {}
3675 
3676   /// See AbstractAttribute::trackStatistics()
3677   void trackStatistics() const override {
3678     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3679   }
3680 };
3681 
3682 /// Dereferenceable attribute for a call site argument.
3683 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3684   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3685       : AADereferenceableFloating(IRP, A) {}
3686 
3687   /// See AbstractAttribute::trackStatistics()
3688   void trackStatistics() const override {
3689     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3690   }
3691 };
3692 
3693 /// Dereferenceable attribute deduction for a call site return value.
3694 struct AADereferenceableCallSiteReturned final
3695     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3696   using Base =
3697       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3698   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3699       : Base(IRP, A) {}
3700 
3701   /// See AbstractAttribute::trackStatistics()
3702   void trackStatistics() const override {
3703     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3704   }
3705 };
3706 
3707 // ------------------------ Align Argument Attribute ------------------------
3708 
3709 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3710                                     Value &AssociatedValue, const Use *U,
3711                                     const Instruction *I, bool &TrackUse) {
3712   // We need to follow common pointer manipulation uses to the accesses they
3713   // feed into.
3714   if (isa<CastInst>(I)) {
3715     // Follow all but ptr2int casts.
3716     TrackUse = !isa<PtrToIntInst>(I);
3717     return 0;
3718   }
3719   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3720     if (GEP->hasAllConstantIndices())
3721       TrackUse = true;
3722     return 0;
3723   }
3724 
3725   MaybeAlign MA;
3726   if (const auto *CB = dyn_cast<CallBase>(I)) {
3727     if (CB->isBundleOperand(U) || CB->isCallee(U))
3728       return 0;
3729 
3730     unsigned ArgNo = CB->getArgOperandNo(U);
3731     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3732     // As long as we only use known information there is no need to track
3733     // dependences here.
3734     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3735     MA = MaybeAlign(AlignAA.getKnownAlign());
3736   }
3737 
3738   const DataLayout &DL = A.getDataLayout();
3739   const Value *UseV = U->get();
3740   if (auto *SI = dyn_cast<StoreInst>(I)) {
3741     if (SI->getPointerOperand() == UseV)
3742       MA = SI->getAlign();
3743   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3744     if (LI->getPointerOperand() == UseV)
3745       MA = LI->getAlign();
3746   }
3747 
3748   if (!MA || *MA <= QueryingAA.getKnownAlign())
3749     return 0;
3750 
3751   unsigned Alignment = MA->value();
3752   int64_t Offset;
3753 
3754   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3755     if (Base == &AssociatedValue) {
3756       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3757       // So we can say that the maximum power of two which is a divisor of
3758       // gcd(Offset, Alignment) is an alignment.
3759 
3760       uint32_t gcd =
3761           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3762       Alignment = llvm::PowerOf2Floor(gcd);
3763     }
3764   }
3765 
3766   return Alignment;
3767 }
3768 
3769 struct AAAlignImpl : AAAlign {
3770   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3771 
3772   /// See AbstractAttribute::initialize(...).
3773   void initialize(Attributor &A) override {
3774     SmallVector<Attribute, 4> Attrs;
3775     getAttrs({Attribute::Alignment}, Attrs);
3776     for (const Attribute &Attr : Attrs)
3777       takeKnownMaximum(Attr.getValueAsInt());
3778 
3779     Value &V = getAssociatedValue();
3780     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3781     //       use of the function pointer. This was caused by D73131. We want to
3782     //       avoid this for function pointers especially because we iterate
3783     //       their uses and int2ptr is not handled. It is not a correctness
3784     //       problem though!
3785     if (!V.getType()->getPointerElementType()->isFunctionTy())
3786       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3787 
3788     if (getIRPosition().isFnInterfaceKind() &&
3789         (!getAnchorScope() ||
3790          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3791       indicatePessimisticFixpoint();
3792       return;
3793     }
3794 
3795     if (Instruction *CtxI = getCtxI())
3796       followUsesInMBEC(*this, A, getState(), *CtxI);
3797   }
3798 
3799   /// See AbstractAttribute::manifest(...).
3800   ChangeStatus manifest(Attributor &A) override {
3801     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3802 
3803     // Check for users that allow alignment annotations.
3804     Value &AssociatedValue = getAssociatedValue();
3805     for (const Use &U : AssociatedValue.uses()) {
3806       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3807         if (SI->getPointerOperand() == &AssociatedValue)
3808           if (SI->getAlignment() < getAssumedAlign()) {
3809             STATS_DECLTRACK(AAAlign, Store,
3810                             "Number of times alignment added to a store");
3811             SI->setAlignment(Align(getAssumedAlign()));
3812             LoadStoreChanged = ChangeStatus::CHANGED;
3813           }
3814       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3815         if (LI->getPointerOperand() == &AssociatedValue)
3816           if (LI->getAlignment() < getAssumedAlign()) {
3817             LI->setAlignment(Align(getAssumedAlign()));
3818             STATS_DECLTRACK(AAAlign, Load,
3819                             "Number of times alignment added to a load");
3820             LoadStoreChanged = ChangeStatus::CHANGED;
3821           }
3822       }
3823     }
3824 
3825     ChangeStatus Changed = AAAlign::manifest(A);
3826 
3827     Align InheritAlign =
3828         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3829     if (InheritAlign >= getAssumedAlign())
3830       return LoadStoreChanged;
3831     return Changed | LoadStoreChanged;
3832   }
3833 
3834   // TODO: Provide a helper to determine the implied ABI alignment and check in
3835   //       the existing manifest method and a new one for AAAlignImpl that value
3836   //       to avoid making the alignment explicit if it did not improve.
3837 
3838   /// See AbstractAttribute::getDeducedAttributes
3839   virtual void
3840   getDeducedAttributes(LLVMContext &Ctx,
3841                        SmallVectorImpl<Attribute> &Attrs) const override {
3842     if (getAssumedAlign() > 1)
3843       Attrs.emplace_back(
3844           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3845   }
3846 
3847   /// See followUsesInMBEC
3848   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3849                        AAAlign::StateType &State) {
3850     bool TrackUse = false;
3851 
3852     unsigned int KnownAlign =
3853         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3854     State.takeKnownMaximum(KnownAlign);
3855 
3856     return TrackUse;
3857   }
3858 
3859   /// See AbstractAttribute::getAsStr().
3860   const std::string getAsStr() const override {
3861     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3862                                 "-" + std::to_string(getAssumedAlign()) + ">")
3863                              : "unknown-align";
3864   }
3865 };
3866 
3867 /// Align attribute for a floating value.
3868 struct AAAlignFloating : AAAlignImpl {
3869   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3870 
3871   /// See AbstractAttribute::updateImpl(...).
3872   ChangeStatus updateImpl(Attributor &A) override {
3873     const DataLayout &DL = A.getDataLayout();
3874 
3875     auto VisitValueCB = [&](Value &V, const Instruction *,
3876                             AAAlign::StateType &T, bool Stripped) -> bool {
3877       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3878                                            DepClassTy::REQUIRED);
3879       if (!Stripped && this == &AA) {
3880         int64_t Offset;
3881         unsigned Alignment = 1;
3882         if (const Value *Base =
3883                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3884           Align PA = Base->getPointerAlignment(DL);
3885           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3886           // So we can say that the maximum power of two which is a divisor of
3887           // gcd(Offset, Alignment) is an alignment.
3888 
3889           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3890                                                uint32_t(PA.value()));
3891           Alignment = llvm::PowerOf2Floor(gcd);
3892         } else {
3893           Alignment = V.getPointerAlignment(DL).value();
3894         }
3895         // Use only IR information if we did not strip anything.
3896         T.takeKnownMaximum(Alignment);
3897         T.indicatePessimisticFixpoint();
3898       } else {
3899         // Use abstract attribute information.
3900         const AAAlign::StateType &DS = AA.getState();
3901         T ^= DS;
3902       }
3903       return T.isValidState();
3904     };
3905 
3906     StateType T;
3907     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3908                                                    VisitValueCB, getCtxI()))
3909       return indicatePessimisticFixpoint();
3910 
3911     // TODO: If we know we visited all incoming values, thus no are assumed
3912     // dead, we can take the known information from the state T.
3913     return clampStateAndIndicateChange(getState(), T);
3914   }
3915 
3916   /// See AbstractAttribute::trackStatistics()
3917   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3918 };
3919 
3920 /// Align attribute for function return value.
3921 struct AAAlignReturned final
3922     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3923   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3924   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3925 
3926   /// See AbstractAttribute::initialize(...).
3927   void initialize(Attributor &A) override {
3928     Base::initialize(A);
3929     Function *F = getAssociatedFunction();
3930     if (!F || F->isDeclaration())
3931       indicatePessimisticFixpoint();
3932   }
3933 
3934   /// See AbstractAttribute::trackStatistics()
3935   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3936 };
3937 
3938 /// Align attribute for function argument.
3939 struct AAAlignArgument final
3940     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3941   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3942   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3943 
3944   /// See AbstractAttribute::manifest(...).
3945   ChangeStatus manifest(Attributor &A) override {
3946     // If the associated argument is involved in a must-tail call we give up
3947     // because we would need to keep the argument alignments of caller and
3948     // callee in-sync. Just does not seem worth the trouble right now.
3949     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3950       return ChangeStatus::UNCHANGED;
3951     return Base::manifest(A);
3952   }
3953 
3954   /// See AbstractAttribute::trackStatistics()
3955   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3956 };
3957 
3958 struct AAAlignCallSiteArgument final : AAAlignFloating {
3959   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3960       : AAAlignFloating(IRP, A) {}
3961 
3962   /// See AbstractAttribute::manifest(...).
3963   ChangeStatus manifest(Attributor &A) override {
3964     // If the associated argument is involved in a must-tail call we give up
3965     // because we would need to keep the argument alignments of caller and
3966     // callee in-sync. Just does not seem worth the trouble right now.
3967     if (Argument *Arg = getAssociatedArgument())
3968       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3969         return ChangeStatus::UNCHANGED;
3970     ChangeStatus Changed = AAAlignImpl::manifest(A);
3971     Align InheritAlign =
3972         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3973     if (InheritAlign >= getAssumedAlign())
3974       Changed = ChangeStatus::UNCHANGED;
3975     return Changed;
3976   }
3977 
3978   /// See AbstractAttribute::updateImpl(Attributor &A).
3979   ChangeStatus updateImpl(Attributor &A) override {
3980     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3981     if (Argument *Arg = getAssociatedArgument()) {
3982       // We only take known information from the argument
3983       // so we do not need to track a dependence.
3984       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3985           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3986       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3987     }
3988     return Changed;
3989   }
3990 
3991   /// See AbstractAttribute::trackStatistics()
3992   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3993 };
3994 
3995 /// Align attribute deduction for a call site return value.
3996 struct AAAlignCallSiteReturned final
3997     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3998   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3999   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4000       : Base(IRP, A) {}
4001 
4002   /// See AbstractAttribute::initialize(...).
4003   void initialize(Attributor &A) override {
4004     Base::initialize(A);
4005     Function *F = getAssociatedFunction();
4006     if (!F || F->isDeclaration())
4007       indicatePessimisticFixpoint();
4008   }
4009 
4010   /// See AbstractAttribute::trackStatistics()
4011   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4012 };
4013 
4014 /// ------------------ Function No-Return Attribute ----------------------------
4015 struct AANoReturnImpl : public AANoReturn {
4016   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4017 
4018   /// See AbstractAttribute::initialize(...).
4019   void initialize(Attributor &A) override {
4020     AANoReturn::initialize(A);
4021     Function *F = getAssociatedFunction();
4022     if (!F || F->isDeclaration())
4023       indicatePessimisticFixpoint();
4024   }
4025 
4026   /// See AbstractAttribute::getAsStr().
4027   const std::string getAsStr() const override {
4028     return getAssumed() ? "noreturn" : "may-return";
4029   }
4030 
4031   /// See AbstractAttribute::updateImpl(Attributor &A).
4032   virtual ChangeStatus updateImpl(Attributor &A) override {
4033     auto CheckForNoReturn = [](Instruction &) { return false; };
4034     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4035                                    {(unsigned)Instruction::Ret}))
4036       return indicatePessimisticFixpoint();
4037     return ChangeStatus::UNCHANGED;
4038   }
4039 };
4040 
4041 struct AANoReturnFunction final : AANoReturnImpl {
4042   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4043       : AANoReturnImpl(IRP, A) {}
4044 
4045   /// See AbstractAttribute::trackStatistics()
4046   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4047 };
4048 
4049 /// NoReturn attribute deduction for a call sites.
4050 struct AANoReturnCallSite final : AANoReturnImpl {
4051   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4052       : AANoReturnImpl(IRP, A) {}
4053 
4054   /// See AbstractAttribute::initialize(...).
4055   void initialize(Attributor &A) override {
4056     AANoReturnImpl::initialize(A);
4057     if (Function *F = getAssociatedFunction()) {
4058       const IRPosition &FnPos = IRPosition::function(*F);
4059       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4060       if (!FnAA.isAssumedNoReturn())
4061         indicatePessimisticFixpoint();
4062     }
4063   }
4064 
4065   /// See AbstractAttribute::updateImpl(...).
4066   ChangeStatus updateImpl(Attributor &A) override {
4067     // TODO: Once we have call site specific value information we can provide
4068     //       call site specific liveness information and then it makes
4069     //       sense to specialize attributes for call sites arguments instead of
4070     //       redirecting requests to the callee argument.
4071     Function *F = getAssociatedFunction();
4072     const IRPosition &FnPos = IRPosition::function(*F);
4073     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4074     return clampStateAndIndicateChange(getState(), FnAA.getState());
4075   }
4076 
4077   /// See AbstractAttribute::trackStatistics()
4078   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4079 };
4080 
4081 /// ----------------------- Variable Capturing ---------------------------------
4082 
4083 /// A class to hold the state of for no-capture attributes.
4084 struct AANoCaptureImpl : public AANoCapture {
4085   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4086 
4087   /// See AbstractAttribute::initialize(...).
4088   void initialize(Attributor &A) override {
4089     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4090       indicateOptimisticFixpoint();
4091       return;
4092     }
4093     Function *AnchorScope = getAnchorScope();
4094     if (isFnInterfaceKind() &&
4095         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4096       indicatePessimisticFixpoint();
4097       return;
4098     }
4099 
4100     // You cannot "capture" null in the default address space.
4101     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4102         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4103       indicateOptimisticFixpoint();
4104       return;
4105     }
4106 
4107     const Function *F =
4108         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4109 
4110     // Check what state the associated function can actually capture.
4111     if (F)
4112       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4113     else
4114       indicatePessimisticFixpoint();
4115   }
4116 
4117   /// See AbstractAttribute::updateImpl(...).
4118   ChangeStatus updateImpl(Attributor &A) override;
4119 
4120   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4121   virtual void
4122   getDeducedAttributes(LLVMContext &Ctx,
4123                        SmallVectorImpl<Attribute> &Attrs) const override {
4124     if (!isAssumedNoCaptureMaybeReturned())
4125       return;
4126 
4127     if (isArgumentPosition()) {
4128       if (isAssumedNoCapture())
4129         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4130       else if (ManifestInternal)
4131         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4132     }
4133   }
4134 
4135   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4136   /// depending on the ability of the function associated with \p IRP to capture
4137   /// state in memory and through "returning/throwing", respectively.
4138   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4139                                                    const Function &F,
4140                                                    BitIntegerState &State) {
4141     // TODO: Once we have memory behavior attributes we should use them here.
4142 
4143     // If we know we cannot communicate or write to memory, we do not care about
4144     // ptr2int anymore.
4145     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4146         F.getReturnType()->isVoidTy()) {
4147       State.addKnownBits(NO_CAPTURE);
4148       return;
4149     }
4150 
4151     // A function cannot capture state in memory if it only reads memory, it can
4152     // however return/throw state and the state might be influenced by the
4153     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4154     if (F.onlyReadsMemory())
4155       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4156 
4157     // A function cannot communicate state back if it does not through
4158     // exceptions and doesn not return values.
4159     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4160       State.addKnownBits(NOT_CAPTURED_IN_RET);
4161 
4162     // Check existing "returned" attributes.
4163     int ArgNo = IRP.getCalleeArgNo();
4164     if (F.doesNotThrow() && ArgNo >= 0) {
4165       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4166         if (F.hasParamAttribute(u, Attribute::Returned)) {
4167           if (u == unsigned(ArgNo))
4168             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4169           else if (F.onlyReadsMemory())
4170             State.addKnownBits(NO_CAPTURE);
4171           else
4172             State.addKnownBits(NOT_CAPTURED_IN_RET);
4173           break;
4174         }
4175     }
4176   }
4177 
4178   /// See AbstractState::getAsStr().
4179   const std::string getAsStr() const override {
4180     if (isKnownNoCapture())
4181       return "known not-captured";
4182     if (isAssumedNoCapture())
4183       return "assumed not-captured";
4184     if (isKnownNoCaptureMaybeReturned())
4185       return "known not-captured-maybe-returned";
4186     if (isAssumedNoCaptureMaybeReturned())
4187       return "assumed not-captured-maybe-returned";
4188     return "assumed-captured";
4189   }
4190 };
4191 
4192 /// Attributor-aware capture tracker.
4193 struct AACaptureUseTracker final : public CaptureTracker {
4194 
4195   /// Create a capture tracker that can lookup in-flight abstract attributes
4196   /// through the Attributor \p A.
4197   ///
4198   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4199   /// search is stopped. If a use leads to a return instruction,
4200   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4201   /// If a use leads to a ptr2int which may capture the value,
4202   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4203   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4204   /// set. All values in \p PotentialCopies are later tracked as well. For every
4205   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4206   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4207   /// conservatively set to true.
4208   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4209                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4210                       SmallVectorImpl<const Value *> &PotentialCopies,
4211                       unsigned &RemainingUsesToExplore)
4212       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4213         PotentialCopies(PotentialCopies),
4214         RemainingUsesToExplore(RemainingUsesToExplore) {}
4215 
4216   /// Determine if \p V maybe captured. *Also updates the state!*
4217   bool valueMayBeCaptured(const Value *V) {
4218     if (V->getType()->isPointerTy()) {
4219       PointerMayBeCaptured(V, this);
4220     } else {
4221       State.indicatePessimisticFixpoint();
4222     }
4223     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4224   }
4225 
4226   /// See CaptureTracker::tooManyUses().
4227   void tooManyUses() override {
4228     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4229   }
4230 
4231   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4232     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4233       return true;
4234     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4235         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4236     return DerefAA.getAssumedDereferenceableBytes();
4237   }
4238 
4239   /// See CaptureTracker::captured(...).
4240   bool captured(const Use *U) override {
4241     Instruction *UInst = cast<Instruction>(U->getUser());
4242     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4243                       << "\n");
4244 
4245     // Because we may reuse the tracker multiple times we keep track of the
4246     // number of explored uses ourselves as well.
4247     if (RemainingUsesToExplore-- == 0) {
4248       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4249       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4250                           /* Return */ true);
4251     }
4252 
4253     // Deal with ptr2int by following uses.
4254     if (isa<PtrToIntInst>(UInst)) {
4255       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4256       return valueMayBeCaptured(UInst);
4257     }
4258 
4259     // Explicitly catch return instructions.
4260     if (isa<ReturnInst>(UInst))
4261       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4262                           /* Return */ true);
4263 
4264     // For now we only use special logic for call sites. However, the tracker
4265     // itself knows about a lot of other non-capturing cases already.
4266     auto *CB = dyn_cast<CallBase>(UInst);
4267     if (!CB || !CB->isArgOperand(U))
4268       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4269                           /* Return */ true);
4270 
4271     unsigned ArgNo = CB->getArgOperandNo(U);
4272     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4273     // If we have a abstract no-capture attribute for the argument we can use
4274     // it to justify a non-capture attribute here. This allows recursion!
4275     auto &ArgNoCaptureAA =
4276         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4277     if (ArgNoCaptureAA.isAssumedNoCapture())
4278       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4279                           /* Return */ false);
4280     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4281       addPotentialCopy(*CB);
4282       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4283                           /* Return */ false);
4284     }
4285 
4286     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4287     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4288                         /* Return */ true);
4289   }
4290 
4291   /// Register \p CS as potential copy of the value we are checking.
4292   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4293 
4294   /// See CaptureTracker::shouldExplore(...).
4295   bool shouldExplore(const Use *U) override {
4296     // Check liveness and ignore droppable users.
4297     return !U->getUser()->isDroppable() &&
4298            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4299   }
4300 
4301   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4302   /// \p CapturedInRet, then return the appropriate value for use in the
4303   /// CaptureTracker::captured() interface.
4304   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4305                     bool CapturedInRet) {
4306     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4307                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4308     if (CapturedInMem)
4309       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4310     if (CapturedInInt)
4311       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4312     if (CapturedInRet)
4313       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4314     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4315   }
4316 
4317 private:
4318   /// The attributor providing in-flight abstract attributes.
4319   Attributor &A;
4320 
4321   /// The abstract attribute currently updated.
4322   AANoCapture &NoCaptureAA;
4323 
4324   /// The abstract liveness state.
4325   const AAIsDead &IsDeadAA;
4326 
4327   /// The state currently updated.
4328   AANoCapture::StateType &State;
4329 
4330   /// Set of potential copies of the tracked value.
4331   SmallVectorImpl<const Value *> &PotentialCopies;
4332 
4333   /// Global counter to limit the number of explored uses.
4334   unsigned &RemainingUsesToExplore;
4335 };
4336 
4337 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4338   const IRPosition &IRP = getIRPosition();
4339   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4340                                         : &IRP.getAssociatedValue();
4341   if (!V)
4342     return indicatePessimisticFixpoint();
4343 
4344   const Function *F =
4345       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4346   assert(F && "Expected a function!");
4347   const IRPosition &FnPos = IRPosition::function(*F);
4348   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4349 
4350   AANoCapture::StateType T;
4351 
4352   // Readonly means we cannot capture through memory.
4353   const auto &FnMemAA =
4354       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4355   if (FnMemAA.isAssumedReadOnly()) {
4356     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4357     if (FnMemAA.isKnownReadOnly())
4358       addKnownBits(NOT_CAPTURED_IN_MEM);
4359     else
4360       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4361   }
4362 
4363   // Make sure all returned values are different than the underlying value.
4364   // TODO: we could do this in a more sophisticated way inside
4365   //       AAReturnedValues, e.g., track all values that escape through returns
4366   //       directly somehow.
4367   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4368     bool SeenConstant = false;
4369     for (auto &It : RVAA.returned_values()) {
4370       if (isa<Constant>(It.first)) {
4371         if (SeenConstant)
4372           return false;
4373         SeenConstant = true;
4374       } else if (!isa<Argument>(It.first) ||
4375                  It.first == getAssociatedArgument())
4376         return false;
4377     }
4378     return true;
4379   };
4380 
4381   const auto &NoUnwindAA =
4382       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4383   if (NoUnwindAA.isAssumedNoUnwind()) {
4384     bool IsVoidTy = F->getReturnType()->isVoidTy();
4385     const AAReturnedValues *RVAA =
4386         IsVoidTy ? nullptr
4387                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4388 
4389                                                  DepClassTy::OPTIONAL);
4390     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4391       T.addKnownBits(NOT_CAPTURED_IN_RET);
4392       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4393         return ChangeStatus::UNCHANGED;
4394       if (NoUnwindAA.isKnownNoUnwind() &&
4395           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4396         addKnownBits(NOT_CAPTURED_IN_RET);
4397         if (isKnown(NOT_CAPTURED_IN_MEM))
4398           return indicateOptimisticFixpoint();
4399       }
4400     }
4401   }
4402 
4403   // Use the CaptureTracker interface and logic with the specialized tracker,
4404   // defined in AACaptureUseTracker, that can look at in-flight abstract
4405   // attributes and directly updates the assumed state.
4406   SmallVector<const Value *, 4> PotentialCopies;
4407   unsigned RemainingUsesToExplore =
4408       getDefaultMaxUsesToExploreForCaptureTracking();
4409   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4410                               RemainingUsesToExplore);
4411 
4412   // Check all potential copies of the associated value until we can assume
4413   // none will be captured or we have to assume at least one might be.
4414   unsigned Idx = 0;
4415   PotentialCopies.push_back(V);
4416   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4417     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4418 
4419   AANoCapture::StateType &S = getState();
4420   auto Assumed = S.getAssumed();
4421   S.intersectAssumedBits(T.getAssumed());
4422   if (!isAssumedNoCaptureMaybeReturned())
4423     return indicatePessimisticFixpoint();
4424   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4425                                    : ChangeStatus::CHANGED;
4426 }
4427 
4428 /// NoCapture attribute for function arguments.
4429 struct AANoCaptureArgument final : AANoCaptureImpl {
4430   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4431       : AANoCaptureImpl(IRP, A) {}
4432 
4433   /// See AbstractAttribute::trackStatistics()
4434   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4435 };
4436 
4437 /// NoCapture attribute for call site arguments.
4438 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4439   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4440       : AANoCaptureImpl(IRP, A) {}
4441 
4442   /// See AbstractAttribute::initialize(...).
4443   void initialize(Attributor &A) override {
4444     if (Argument *Arg = getAssociatedArgument())
4445       if (Arg->hasByValAttr())
4446         indicateOptimisticFixpoint();
4447     AANoCaptureImpl::initialize(A);
4448   }
4449 
4450   /// See AbstractAttribute::updateImpl(...).
4451   ChangeStatus updateImpl(Attributor &A) override {
4452     // TODO: Once we have call site specific value information we can provide
4453     //       call site specific liveness information and then it makes
4454     //       sense to specialize attributes for call sites arguments instead of
4455     //       redirecting requests to the callee argument.
4456     Argument *Arg = getAssociatedArgument();
4457     if (!Arg)
4458       return indicatePessimisticFixpoint();
4459     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4460     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4461     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4462   }
4463 
4464   /// See AbstractAttribute::trackStatistics()
4465   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4466 };
4467 
4468 /// NoCapture attribute for floating values.
4469 struct AANoCaptureFloating final : AANoCaptureImpl {
4470   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4471       : AANoCaptureImpl(IRP, A) {}
4472 
4473   /// See AbstractAttribute::trackStatistics()
4474   void trackStatistics() const override {
4475     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4476   }
4477 };
4478 
4479 /// NoCapture attribute for function return value.
4480 struct AANoCaptureReturned final : AANoCaptureImpl {
4481   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4482       : AANoCaptureImpl(IRP, A) {
4483     llvm_unreachable("NoCapture is not applicable to function returns!");
4484   }
4485 
4486   /// See AbstractAttribute::initialize(...).
4487   void initialize(Attributor &A) override {
4488     llvm_unreachable("NoCapture is not applicable to function returns!");
4489   }
4490 
4491   /// See AbstractAttribute::updateImpl(...).
4492   ChangeStatus updateImpl(Attributor &A) override {
4493     llvm_unreachable("NoCapture is not applicable to function returns!");
4494   }
4495 
4496   /// See AbstractAttribute::trackStatistics()
4497   void trackStatistics() const override {}
4498 };
4499 
4500 /// NoCapture attribute deduction for a call site return value.
4501 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4502   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4503       : AANoCaptureImpl(IRP, A) {}
4504 
4505   /// See AbstractAttribute::initialize(...).
4506   void initialize(Attributor &A) override {
4507     const Function *F = getAnchorScope();
4508     // Check what state the associated function can actually capture.
4509     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4510   }
4511 
4512   /// See AbstractAttribute::trackStatistics()
4513   void trackStatistics() const override {
4514     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4515   }
4516 };
4517 
4518 /// ------------------ Value Simplify Attribute ----------------------------
4519 struct AAValueSimplifyImpl : AAValueSimplify {
4520   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4521       : AAValueSimplify(IRP, A) {}
4522 
4523   /// See AbstractAttribute::initialize(...).
4524   void initialize(Attributor &A) override {
4525     if (getAssociatedValue().getType()->isVoidTy())
4526       indicatePessimisticFixpoint();
4527   }
4528 
4529   /// See AbstractAttribute::getAsStr().
4530   const std::string getAsStr() const override {
4531     LLVM_DEBUG({
4532       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4533       if (SimplifiedAssociatedValue)
4534         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4535     });
4536     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4537                         : "not-simple";
4538   }
4539 
4540   /// See AbstractAttribute::trackStatistics()
4541   void trackStatistics() const override {}
4542 
4543   /// See AAValueSimplify::getAssumedSimplifiedValue()
4544   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4545     if (!getAssumed())
4546       return const_cast<Value *>(&getAssociatedValue());
4547     return SimplifiedAssociatedValue;
4548   }
4549 
4550   /// Helper function for querying AAValueSimplify and updating candicate.
4551   /// \param QueryingValue Value trying to unify with SimplifiedValue
4552   /// \param AccumulatedSimplifiedValue Current simplification result.
4553   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4554                              Value &QueryingValue,
4555                              Optional<Value *> &AccumulatedSimplifiedValue) {
4556     // FIXME: Add a typecast support.
4557 
4558     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4559         QueryingAA,
4560         IRPosition::value(QueryingValue, QueryingAA.getCallBaseContext()),
4561         DepClassTy::REQUIRED);
4562 
4563     Optional<Value *> QueryingValueSimplified =
4564         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4565 
4566     if (!QueryingValueSimplified.hasValue())
4567       return true;
4568 
4569     if (!QueryingValueSimplified.getValue())
4570       return false;
4571 
4572     Value &QueryingValueSimplifiedUnwrapped =
4573         *QueryingValueSimplified.getValue();
4574 
4575     if (AccumulatedSimplifiedValue.hasValue() &&
4576         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4577         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4578       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4579     if (AccumulatedSimplifiedValue.hasValue() &&
4580         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4581       return true;
4582 
4583     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4584                       << " is assumed to be "
4585                       << QueryingValueSimplifiedUnwrapped << "\n");
4586 
4587     AccumulatedSimplifiedValue = QueryingValueSimplified;
4588     return true;
4589   }
4590 
4591   /// Returns a candidate is found or not
4592   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4593     if (!getAssociatedValue().getType()->isIntegerTy())
4594       return false;
4595 
4596     // This will also pass the call base context.
4597     const auto &AA =
4598         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4599 
4600     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4601 
4602     if (!COpt.hasValue()) {
4603       SimplifiedAssociatedValue = llvm::None;
4604       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4605       return true;
4606     }
4607     if (auto *C = COpt.getValue()) {
4608       SimplifiedAssociatedValue = C;
4609       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4610       return true;
4611     }
4612     return false;
4613   }
4614 
4615   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4616     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4617       return true;
4618     if (askSimplifiedValueFor<AAPotentialValues>(A))
4619       return true;
4620     return false;
4621   }
4622 
4623   /// See AbstractAttribute::manifest(...).
4624   ChangeStatus manifest(Attributor &A) override {
4625     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4626 
4627     if (SimplifiedAssociatedValue.hasValue() &&
4628         !SimplifiedAssociatedValue.getValue())
4629       return Changed;
4630 
4631     Value &V = getAssociatedValue();
4632     auto *C = SimplifiedAssociatedValue.hasValue()
4633                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4634                   : UndefValue::get(V.getType());
4635     if (C && C != &V) {
4636       Value *NewV = AA::getWithType(*C, *V.getType());
4637       // We can replace the AssociatedValue with the constant.
4638       if (!V.user_empty() && &V != C && NewV) {
4639         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4640                           << " :: " << *this << "\n");
4641         if (A.changeValueAfterManifest(V, *NewV))
4642           Changed = ChangeStatus::CHANGED;
4643       }
4644     }
4645 
4646     return Changed | AAValueSimplify::manifest(A);
4647   }
4648 
4649   /// See AbstractState::indicatePessimisticFixpoint(...).
4650   ChangeStatus indicatePessimisticFixpoint() override {
4651     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4652     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4653     SimplifiedAssociatedValue = &getAssociatedValue();
4654     indicateOptimisticFixpoint();
4655     return ChangeStatus::CHANGED;
4656   }
4657 
4658 protected:
4659   // An assumed simplified value. Initially, it is set to Optional::None, which
4660   // means that the value is not clear under current assumption. If in the
4661   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4662   // returns orignal associated value.
4663   Optional<Value *> SimplifiedAssociatedValue;
4664 };
4665 
4666 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4667   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4668       : AAValueSimplifyImpl(IRP, A) {}
4669 
4670   void initialize(Attributor &A) override {
4671     AAValueSimplifyImpl::initialize(A);
4672     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4673       indicatePessimisticFixpoint();
4674     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4675                  Attribute::StructRet, Attribute::Nest},
4676                 /* IgnoreSubsumingPositions */ true))
4677       indicatePessimisticFixpoint();
4678 
4679     // FIXME: This is a hack to prevent us from propagating function poiner in
4680     // the new pass manager CGSCC pass as it creates call edges the
4681     // CallGraphUpdater cannot handle yet.
4682     Value &V = getAssociatedValue();
4683     if (V.getType()->isPointerTy() &&
4684         V.getType()->getPointerElementType()->isFunctionTy() &&
4685         !A.isModulePass())
4686       indicatePessimisticFixpoint();
4687   }
4688 
4689   /// See AbstractAttribute::updateImpl(...).
4690   ChangeStatus updateImpl(Attributor &A) override {
4691     // Byval is only replacable if it is readonly otherwise we would write into
4692     // the replaced value and not the copy that byval creates implicitly.
4693     Argument *Arg = getAssociatedArgument();
4694     if (Arg->hasByValAttr()) {
4695       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4696       //       there is no race by not copying a constant byval.
4697       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4698                                                        DepClassTy::REQUIRED);
4699       if (!MemAA.isAssumedReadOnly())
4700         return indicatePessimisticFixpoint();
4701     }
4702 
4703     auto Before = SimplifiedAssociatedValue;
4704 
4705     auto PredForCallSite = [&](AbstractCallSite ACS) {
4706       const IRPosition &ACSArgPos =
4707           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4708       // Check if a coresponding argument was found or if it is on not
4709       // associated (which can happen for callback calls).
4710       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4711         return false;
4712 
4713       // We can only propagate thread independent values through callbacks.
4714       // This is different to direct/indirect call sites because for them we
4715       // know the thread executing the caller and callee is the same. For
4716       // callbacks this is not guaranteed, thus a thread dependent value could
4717       // be different for the caller and callee, making it invalid to propagate.
4718       Value &ArgOp = ACSArgPos.getAssociatedValue();
4719       if (ACS.isCallbackCall())
4720         if (auto *C = dyn_cast<Constant>(&ArgOp))
4721           if (C->isThreadDependent())
4722             return false;
4723       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4724     };
4725 
4726     // Generate a answer specific to a call site context.
4727     bool Success;
4728     bool AllCallSitesKnown;
4729     if (hasCallBaseContext())
4730       Success = PredForCallSite(
4731           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4732     else
4733       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4734                                        AllCallSitesKnown);
4735 
4736     if (!Success)
4737       if (!askSimplifiedValueForOtherAAs(A))
4738         return indicatePessimisticFixpoint();
4739 
4740     // If a candicate was found in this update, return CHANGED.
4741     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4742                                                : ChangeStatus ::CHANGED;
4743   }
4744 
4745   /// See AbstractAttribute::trackStatistics()
4746   void trackStatistics() const override {
4747     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4748   }
4749 };
4750 
4751 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4752   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4753       : AAValueSimplifyImpl(IRP, A) {}
4754 
4755   /// See AbstractAttribute::updateImpl(...).
4756   ChangeStatus updateImpl(Attributor &A) override {
4757     auto Before = SimplifiedAssociatedValue;
4758 
4759     auto PredForReturned = [&](Value &V) {
4760       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4761     };
4762 
4763     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4764       if (!askSimplifiedValueForOtherAAs(A))
4765         return indicatePessimisticFixpoint();
4766 
4767     // If a candicate was found in this update, return CHANGED.
4768     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4769                                                : ChangeStatus ::CHANGED;
4770   }
4771 
4772   ChangeStatus manifest(Attributor &A) override {
4773     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4774 
4775     if (SimplifiedAssociatedValue.hasValue() &&
4776         !SimplifiedAssociatedValue.getValue())
4777       return Changed;
4778 
4779     Value &V = getAssociatedValue();
4780     auto *C = SimplifiedAssociatedValue.hasValue()
4781                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4782                   : UndefValue::get(V.getType());
4783     if (C && C != &V) {
4784       auto PredForReturned =
4785           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4786             // We can replace the AssociatedValue with the constant.
4787             if (&V == C || isa<UndefValue>(V))
4788               return true;
4789 
4790             for (ReturnInst *RI : RetInsts) {
4791               if (RI->getFunction() != getAnchorScope())
4792                 continue;
4793               Value *NewV =
4794                   AA::getWithType(*C, *RI->getReturnValue()->getType());
4795               if (!NewV)
4796                 continue;
4797               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4798                                 << " in " << *RI << " :: " << *this << "\n");
4799               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4800                 Changed = ChangeStatus::CHANGED;
4801             }
4802             return true;
4803           };
4804       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4805     }
4806 
4807     return Changed | AAValueSimplify::manifest(A);
4808   }
4809 
4810   /// See AbstractAttribute::trackStatistics()
4811   void trackStatistics() const override {
4812     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4813   }
4814 };
4815 
4816 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4817   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4818       : AAValueSimplifyImpl(IRP, A) {}
4819 
4820   /// See AbstractAttribute::initialize(...).
4821   void initialize(Attributor &A) override {
4822     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4823     //        Needs investigation.
4824     // AAValueSimplifyImpl::initialize(A);
4825     Value &V = getAnchorValue();
4826 
4827     // TODO: add other stuffs
4828     if (isa<Constant>(V))
4829       indicatePessimisticFixpoint();
4830   }
4831 
4832   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4833   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4834   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4835   /// updated and \p Changed is set appropriately.
4836   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4837                               ChangeStatus &Changed) {
4838     if (!ICmp)
4839       return false;
4840     if (!ICmp->isEquality())
4841       return false;
4842 
4843     // This is a comparison with == or !-. We check for nullptr now.
4844     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4845     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4846     if (!Op0IsNull && !Op1IsNull)
4847       return false;
4848 
4849     LLVMContext &Ctx = ICmp->getContext();
4850     // Check for `nullptr ==/!= nullptr` first:
4851     if (Op0IsNull && Op1IsNull) {
4852       Value *NewVal = ConstantInt::get(
4853           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4854       assert(!SimplifiedAssociatedValue.hasValue() &&
4855              "Did not expect non-fixed value for constant comparison");
4856       SimplifiedAssociatedValue = NewVal;
4857       indicateOptimisticFixpoint();
4858       Changed = ChangeStatus::CHANGED;
4859       return true;
4860     }
4861 
4862     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4863     // non-nullptr operand and if we assume it's non-null we can conclude the
4864     // result of the comparison.
4865     assert((Op0IsNull || Op1IsNull) &&
4866            "Expected nullptr versus non-nullptr comparison at this point");
4867 
4868     // The index is the operand that we assume is not null.
4869     unsigned PtrIdx = Op0IsNull;
4870     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4871         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4872         DepClassTy::REQUIRED);
4873     if (!PtrNonNullAA.isAssumedNonNull())
4874       return false;
4875 
4876     // The new value depends on the predicate, true for != and false for ==.
4877     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4878                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4879 
4880     assert((!SimplifiedAssociatedValue.hasValue() ||
4881             SimplifiedAssociatedValue == NewVal) &&
4882            "Did not expect to change value for zero-comparison");
4883 
4884     auto Before = SimplifiedAssociatedValue;
4885     SimplifiedAssociatedValue = NewVal;
4886 
4887     if (PtrNonNullAA.isKnownNonNull())
4888       indicateOptimisticFixpoint();
4889 
4890     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4891                                                   : ChangeStatus ::CHANGED;
4892     return true;
4893   }
4894 
4895   /// See AbstractAttribute::updateImpl(...).
4896   ChangeStatus updateImpl(Attributor &A) override {
4897     auto Before = SimplifiedAssociatedValue;
4898 
4899     ChangeStatus Changed;
4900     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4901                                Changed))
4902       return Changed;
4903 
4904     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4905                             bool Stripped) -> bool {
4906       auto &AA = A.getAAFor<AAValueSimplify>(
4907           *this, IRPosition::value(V, getCallBaseContext()),
4908           DepClassTy::REQUIRED);
4909       if (!Stripped && this == &AA) {
4910         // TODO: Look the instruction and check recursively.
4911 
4912         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4913                           << "\n");
4914         return false;
4915       }
4916       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4917     };
4918 
4919     bool Dummy = false;
4920     if (!genericValueTraversal<AAValueSimplify, bool>(
4921             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4922             /* UseValueSimplify */ false))
4923       if (!askSimplifiedValueForOtherAAs(A))
4924         return indicatePessimisticFixpoint();
4925 
4926     // If a candicate was found in this update, return CHANGED.
4927     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4928                                                : ChangeStatus ::CHANGED;
4929   }
4930 
4931   /// See AbstractAttribute::trackStatistics()
4932   void trackStatistics() const override {
4933     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4934   }
4935 };
4936 
4937 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4938   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4939       : AAValueSimplifyImpl(IRP, A) {}
4940 
4941   /// See AbstractAttribute::initialize(...).
4942   void initialize(Attributor &A) override {
4943     SimplifiedAssociatedValue = &getAnchorValue();
4944     indicateOptimisticFixpoint();
4945   }
4946   /// See AbstractAttribute::initialize(...).
4947   ChangeStatus updateImpl(Attributor &A) override {
4948     llvm_unreachable(
4949         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4950   }
4951   /// See AbstractAttribute::trackStatistics()
4952   void trackStatistics() const override {
4953     STATS_DECLTRACK_FN_ATTR(value_simplify)
4954   }
4955 };
4956 
4957 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4958   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4959       : AAValueSimplifyFunction(IRP, A) {}
4960   /// See AbstractAttribute::trackStatistics()
4961   void trackStatistics() const override {
4962     STATS_DECLTRACK_CS_ATTR(value_simplify)
4963   }
4964 };
4965 
4966 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4967   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4968       : AAValueSimplifyReturned(IRP, A) {}
4969 
4970   /// See AbstractAttribute::manifest(...).
4971   ChangeStatus manifest(Attributor &A) override {
4972     return AAValueSimplifyImpl::manifest(A);
4973   }
4974 
4975   void trackStatistics() const override {
4976     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4977   }
4978 };
4979 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4980   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4981       : AAValueSimplifyFloating(IRP, A) {}
4982 
4983   /// See AbstractAttribute::manifest(...).
4984   ChangeStatus manifest(Attributor &A) override {
4985     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4986 
4987     if (SimplifiedAssociatedValue.hasValue() &&
4988         !SimplifiedAssociatedValue.getValue())
4989       return Changed;
4990 
4991     Value &V = getAssociatedValue();
4992     auto *C = SimplifiedAssociatedValue.hasValue()
4993                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4994                   : UndefValue::get(V.getType());
4995     if (C) {
4996       Use &U = cast<CallBase>(&getAnchorValue())
4997                    ->getArgOperandUse(getCallSiteArgNo());
4998       // We can replace the AssociatedValue with the constant.
4999       if (&V != C) {
5000         if (Value *NewV = AA::getWithType(*C, *V.getType()))
5001           if (A.changeUseAfterManifest(U, *NewV))
5002             Changed = ChangeStatus::CHANGED;
5003       }
5004     }
5005 
5006     return Changed | AAValueSimplify::manifest(A);
5007   }
5008 
5009   void trackStatistics() const override {
5010     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5011   }
5012 };
5013 
5014 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5015 struct AAHeapToStackImpl : public AAHeapToStack {
5016   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5017       : AAHeapToStack(IRP, A) {}
5018 
5019   const std::string getAsStr() const override {
5020     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5021   }
5022 
5023   bool isAssumedHeapToStack(CallBase &CB) const override {
5024     return isValidState() && MallocCalls.contains(&CB) &&
5025            !BadMallocCalls.count(&CB);
5026   }
5027 
5028   bool isKnownHeapToStack(CallBase &CB) const override {
5029     return isValidState() && MallocCalls.contains(&CB) &&
5030            !BadMallocCalls.count(&CB);
5031   }
5032 
5033   ChangeStatus manifest(Attributor &A) override {
5034     assert(getState().isValidState() &&
5035            "Attempted to manifest an invalid state!");
5036 
5037     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5038     Function *F = getAnchorScope();
5039     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5040 
5041     for (Instruction *MallocCall : MallocCalls) {
5042       // This malloc cannot be replaced.
5043       if (BadMallocCalls.count(MallocCall))
5044         continue;
5045 
5046       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5047         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5048         A.deleteAfterManifest(*FreeCall);
5049         HasChanged = ChangeStatus::CHANGED;
5050       }
5051 
5052       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5053                         << "\n");
5054 
5055       Align Alignment;
5056       Value *Size;
5057       if (isCallocLikeFn(MallocCall, TLI)) {
5058         auto *Num = MallocCall->getOperand(0);
5059         auto *SizeT = MallocCall->getOperand(1);
5060         IRBuilder<> B(MallocCall);
5061         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5062       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5063         Size = MallocCall->getOperand(1);
5064         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5065                                    ->getValue()
5066                                    .getZExtValue())
5067                         .valueOrOne();
5068       } else {
5069         Size = MallocCall->getOperand(0);
5070       }
5071 
5072       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5073       Instruction *AI =
5074           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5075                          "", MallocCall->getNextNode());
5076 
5077       if (AI->getType() != MallocCall->getType())
5078         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5079                              AI->getNextNode());
5080 
5081       A.changeValueAfterManifest(*MallocCall, *AI);
5082 
5083       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5084         auto *NBB = II->getNormalDest();
5085         BranchInst::Create(NBB, MallocCall->getParent());
5086         A.deleteAfterManifest(*MallocCall);
5087       } else {
5088         A.deleteAfterManifest(*MallocCall);
5089       }
5090 
5091       // Zero out the allocated memory if it was a calloc.
5092       if (isCallocLikeFn(MallocCall, TLI)) {
5093         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5094                                    AI->getNextNode());
5095         Value *Ops[] = {
5096             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5097             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5098 
5099         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5100         Module *M = F->getParent();
5101         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5102         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5103       }
5104       HasChanged = ChangeStatus::CHANGED;
5105     }
5106 
5107     return HasChanged;
5108   }
5109 
5110   /// Collection of all malloc calls in a function.
5111   SmallSetVector<Instruction *, 4> MallocCalls;
5112 
5113   /// Collection of malloc calls that cannot be converted.
5114   DenseSet<const Instruction *> BadMallocCalls;
5115 
5116   /// A map for each malloc call to the set of associated free calls.
5117   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5118 
5119   ChangeStatus updateImpl(Attributor &A) override;
5120 };
5121 
5122 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5123   const Function *F = getAnchorScope();
5124   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5125 
5126   MustBeExecutedContextExplorer &Explorer =
5127       A.getInfoCache().getMustBeExecutedContextExplorer();
5128 
5129   auto FreeCheck = [&](Instruction &I) {
5130     const auto &Frees = FreesForMalloc.lookup(&I);
5131     if (Frees.size() != 1)
5132       return false;
5133     Instruction *UniqueFree = *Frees.begin();
5134     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5135   };
5136 
5137   auto UsesCheck = [&](Instruction &I) {
5138     bool ValidUsesOnly = true;
5139     bool MustUse = true;
5140     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5141       Instruction *UserI = cast<Instruction>(U.getUser());
5142       if (isa<LoadInst>(UserI))
5143         return true;
5144       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5145         if (SI->getValueOperand() == U.get()) {
5146           LLVM_DEBUG(dbgs()
5147                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5148           ValidUsesOnly = false;
5149         } else {
5150           // A store into the malloc'ed memory is fine.
5151         }
5152         return true;
5153       }
5154       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5155         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5156           return true;
5157         // Record malloc.
5158         if (isFreeCall(UserI, TLI)) {
5159           if (MustUse) {
5160             FreesForMalloc[&I].insert(UserI);
5161           } else {
5162             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5163                               << *UserI << "\n");
5164             ValidUsesOnly = false;
5165           }
5166           return true;
5167         }
5168 
5169         unsigned ArgNo = CB->getArgOperandNo(&U);
5170 
5171         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5172             *this, IRPosition::callsite_argument(*CB, ArgNo),
5173             DepClassTy::REQUIRED);
5174 
5175         // If a callsite argument use is nofree, we are fine.
5176         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5177             *this, IRPosition::callsite_argument(*CB, ArgNo),
5178             DepClassTy::REQUIRED);
5179 
5180         if (!NoCaptureAA.isAssumedNoCapture() ||
5181             !ArgNoFreeAA.isAssumedNoFree()) {
5182           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5183           ValidUsesOnly = false;
5184         }
5185         return true;
5186       }
5187 
5188       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5189           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5190         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5191         Follow = true;
5192         return true;
5193       }
5194       // Unknown user for which we can not track uses further (in a way that
5195       // makes sense).
5196       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5197       ValidUsesOnly = false;
5198       return true;
5199     };
5200     A.checkForAllUses(Pred, *this, I);
5201     return ValidUsesOnly;
5202   };
5203 
5204   auto MallocCallocCheck = [&](Instruction &I) {
5205     if (BadMallocCalls.count(&I))
5206       return true;
5207 
5208     bool IsMalloc = isMallocLikeFn(&I, TLI);
5209     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5210     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5211     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5212       BadMallocCalls.insert(&I);
5213       return true;
5214     }
5215 
5216     if (IsMalloc) {
5217       if (MaxHeapToStackSize == -1) {
5218         if (UsesCheck(I) || FreeCheck(I)) {
5219           MallocCalls.insert(&I);
5220           return true;
5221         }
5222       }
5223       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5224         if (Size->getValue().ule(MaxHeapToStackSize))
5225           if (UsesCheck(I) || FreeCheck(I)) {
5226             MallocCalls.insert(&I);
5227             return true;
5228           }
5229     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5230       if (MaxHeapToStackSize == -1) {
5231         if (UsesCheck(I) || FreeCheck(I)) {
5232           MallocCalls.insert(&I);
5233           return true;
5234         }
5235       }
5236       // Only if the alignment and sizes are constant.
5237       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5238         if (Size->getValue().ule(MaxHeapToStackSize))
5239           if (UsesCheck(I) || FreeCheck(I)) {
5240             MallocCalls.insert(&I);
5241             return true;
5242           }
5243     } else if (IsCalloc) {
5244       if (MaxHeapToStackSize == -1) {
5245         if (UsesCheck(I) || FreeCheck(I)) {
5246           MallocCalls.insert(&I);
5247           return true;
5248         }
5249       }
5250       bool Overflow = false;
5251       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5252         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5253           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5254                   .ule(MaxHeapToStackSize))
5255             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5256               MallocCalls.insert(&I);
5257               return true;
5258             }
5259     }
5260 
5261     BadMallocCalls.insert(&I);
5262     return true;
5263   };
5264 
5265   size_t NumBadMallocs = BadMallocCalls.size();
5266 
5267   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5268 
5269   if (NumBadMallocs != BadMallocCalls.size())
5270     return ChangeStatus::CHANGED;
5271 
5272   return ChangeStatus::UNCHANGED;
5273 }
5274 
5275 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5276   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5277       : AAHeapToStackImpl(IRP, A) {}
5278 
5279   /// See AbstractAttribute::trackStatistics().
5280   void trackStatistics() const override {
5281     STATS_DECL(
5282         MallocCalls, Function,
5283         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5284     for (auto *C : MallocCalls)
5285       if (!BadMallocCalls.count(C))
5286         ++BUILD_STAT_NAME(MallocCalls, Function);
5287   }
5288 };
5289 
5290 /// ----------------------- Privatizable Pointers ------------------------------
5291 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5292   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5293       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5294 
5295   ChangeStatus indicatePessimisticFixpoint() override {
5296     AAPrivatizablePtr::indicatePessimisticFixpoint();
5297     PrivatizableType = nullptr;
5298     return ChangeStatus::CHANGED;
5299   }
5300 
5301   /// Identify the type we can chose for a private copy of the underlying
5302   /// argument. None means it is not clear yet, nullptr means there is none.
5303   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5304 
5305   /// Return a privatizable type that encloses both T0 and T1.
5306   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5307   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5308     if (!T0.hasValue())
5309       return T1;
5310     if (!T1.hasValue())
5311       return T0;
5312     if (T0 == T1)
5313       return T0;
5314     return nullptr;
5315   }
5316 
5317   Optional<Type *> getPrivatizableType() const override {
5318     return PrivatizableType;
5319   }
5320 
5321   const std::string getAsStr() const override {
5322     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5323   }
5324 
5325 protected:
5326   Optional<Type *> PrivatizableType;
5327 };
5328 
5329 // TODO: Do this for call site arguments (probably also other values) as well.
5330 
5331 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5332   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5333       : AAPrivatizablePtrImpl(IRP, A) {}
5334 
5335   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5336   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5337     // If this is a byval argument and we know all the call sites (so we can
5338     // rewrite them), there is no need to check them explicitly.
5339     bool AllCallSitesKnown;
5340     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5341         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5342                                true, AllCallSitesKnown))
5343       return getAssociatedValue().getType()->getPointerElementType();
5344 
5345     Optional<Type *> Ty;
5346     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5347 
5348     // Make sure the associated call site argument has the same type at all call
5349     // sites and it is an allocation we know is safe to privatize, for now that
5350     // means we only allow alloca instructions.
5351     // TODO: We can additionally analyze the accesses in the callee to  create
5352     //       the type from that information instead. That is a little more
5353     //       involved and will be done in a follow up patch.
5354     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5355       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5356       // Check if a coresponding argument was found or if it is one not
5357       // associated (which can happen for callback calls).
5358       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5359         return false;
5360 
5361       // Check that all call sites agree on a type.
5362       auto &PrivCSArgAA =
5363           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5364       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5365 
5366       LLVM_DEBUG({
5367         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5368         if (CSTy.hasValue() && CSTy.getValue())
5369           CSTy.getValue()->print(dbgs());
5370         else if (CSTy.hasValue())
5371           dbgs() << "<nullptr>";
5372         else
5373           dbgs() << "<none>";
5374       });
5375 
5376       Ty = combineTypes(Ty, CSTy);
5377 
5378       LLVM_DEBUG({
5379         dbgs() << " : New Type: ";
5380         if (Ty.hasValue() && Ty.getValue())
5381           Ty.getValue()->print(dbgs());
5382         else if (Ty.hasValue())
5383           dbgs() << "<nullptr>";
5384         else
5385           dbgs() << "<none>";
5386         dbgs() << "\n";
5387       });
5388 
5389       return !Ty.hasValue() || Ty.getValue();
5390     };
5391 
5392     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5393       return nullptr;
5394     return Ty;
5395   }
5396 
5397   /// See AbstractAttribute::updateImpl(...).
5398   ChangeStatus updateImpl(Attributor &A) override {
5399     PrivatizableType = identifyPrivatizableType(A);
5400     if (!PrivatizableType.hasValue())
5401       return ChangeStatus::UNCHANGED;
5402     if (!PrivatizableType.getValue())
5403       return indicatePessimisticFixpoint();
5404 
5405     // The dependence is optional so we don't give up once we give up on the
5406     // alignment.
5407     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5408                         DepClassTy::OPTIONAL);
5409 
5410     // Avoid arguments with padding for now.
5411     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5412         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5413                                                 A.getInfoCache().getDL())) {
5414       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5415       return indicatePessimisticFixpoint();
5416     }
5417 
5418     // Verify callee and caller agree on how the promoted argument would be
5419     // passed.
5420     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5421     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5422     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5423     Function &Fn = *getIRPosition().getAnchorScope();
5424     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5425     ArgsToPromote.insert(getAssociatedArgument());
5426     const auto *TTI =
5427         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5428     if (!TTI ||
5429         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5430             Fn, *TTI, ArgsToPromote, Dummy) ||
5431         ArgsToPromote.empty()) {
5432       LLVM_DEBUG(
5433           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5434                  << Fn.getName() << "\n");
5435       return indicatePessimisticFixpoint();
5436     }
5437 
5438     // Collect the types that will replace the privatizable type in the function
5439     // signature.
5440     SmallVector<Type *, 16> ReplacementTypes;
5441     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5442 
5443     // Register a rewrite of the argument.
5444     Argument *Arg = getAssociatedArgument();
5445     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5446       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5447       return indicatePessimisticFixpoint();
5448     }
5449 
5450     unsigned ArgNo = Arg->getArgNo();
5451 
5452     // Helper to check if for the given call site the associated argument is
5453     // passed to a callback where the privatization would be different.
5454     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5455       SmallVector<const Use *, 4> CallbackUses;
5456       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5457       for (const Use *U : CallbackUses) {
5458         AbstractCallSite CBACS(U);
5459         assert(CBACS && CBACS.isCallbackCall());
5460         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5461           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5462 
5463           LLVM_DEBUG({
5464             dbgs()
5465                 << "[AAPrivatizablePtr] Argument " << *Arg
5466                 << "check if can be privatized in the context of its parent ("
5467                 << Arg->getParent()->getName()
5468                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5469                    "callback ("
5470                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5471                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5472                 << CBACS.getCallArgOperand(CBArg) << " vs "
5473                 << CB.getArgOperand(ArgNo) << "\n"
5474                 << "[AAPrivatizablePtr] " << CBArg << " : "
5475                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5476           });
5477 
5478           if (CBArgNo != int(ArgNo))
5479             continue;
5480           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5481               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5482           if (CBArgPrivAA.isValidState()) {
5483             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5484             if (!CBArgPrivTy.hasValue())
5485               continue;
5486             if (CBArgPrivTy.getValue() == PrivatizableType)
5487               continue;
5488           }
5489 
5490           LLVM_DEBUG({
5491             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5492                    << " cannot be privatized in the context of its parent ("
5493                    << Arg->getParent()->getName()
5494                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5495                       "callback ("
5496                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5497                    << ").\n[AAPrivatizablePtr] for which the argument "
5498                       "privatization is not compatible.\n";
5499           });
5500           return false;
5501         }
5502       }
5503       return true;
5504     };
5505 
5506     // Helper to check if for the given call site the associated argument is
5507     // passed to a direct call where the privatization would be different.
5508     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5509       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5510       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5511       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5512              "Expected a direct call operand for callback call operand");
5513 
5514       LLVM_DEBUG({
5515         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5516                << " check if be privatized in the context of its parent ("
5517                << Arg->getParent()->getName()
5518                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5519                   "direct call of ("
5520                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5521                << ").\n";
5522       });
5523 
5524       Function *DCCallee = DC->getCalledFunction();
5525       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5526         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5527             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5528             DepClassTy::REQUIRED);
5529         if (DCArgPrivAA.isValidState()) {
5530           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5531           if (!DCArgPrivTy.hasValue())
5532             return true;
5533           if (DCArgPrivTy.getValue() == PrivatizableType)
5534             return true;
5535         }
5536       }
5537 
5538       LLVM_DEBUG({
5539         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5540                << " cannot be privatized in the context of its parent ("
5541                << Arg->getParent()->getName()
5542                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5543                   "direct call of ("
5544                << ACS.getInstruction()->getCalledFunction()->getName()
5545                << ").\n[AAPrivatizablePtr] for which the argument "
5546                   "privatization is not compatible.\n";
5547       });
5548       return false;
5549     };
5550 
5551     // Helper to check if the associated argument is used at the given abstract
5552     // call site in a way that is incompatible with the privatization assumed
5553     // here.
5554     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5555       if (ACS.isDirectCall())
5556         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5557       if (ACS.isCallbackCall())
5558         return IsCompatiblePrivArgOfDirectCS(ACS);
5559       return false;
5560     };
5561 
5562     bool AllCallSitesKnown;
5563     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5564                                 AllCallSitesKnown))
5565       return indicatePessimisticFixpoint();
5566 
5567     return ChangeStatus::UNCHANGED;
5568   }
5569 
5570   /// Given a type to private \p PrivType, collect the constituates (which are
5571   /// used) in \p ReplacementTypes.
5572   static void
5573   identifyReplacementTypes(Type *PrivType,
5574                            SmallVectorImpl<Type *> &ReplacementTypes) {
5575     // TODO: For now we expand the privatization type to the fullest which can
5576     //       lead to dead arguments that need to be removed later.
5577     assert(PrivType && "Expected privatizable type!");
5578 
5579     // Traverse the type, extract constituate types on the outermost level.
5580     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5581       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5582         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5583     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5584       ReplacementTypes.append(PrivArrayType->getNumElements(),
5585                               PrivArrayType->getElementType());
5586     } else {
5587       ReplacementTypes.push_back(PrivType);
5588     }
5589   }
5590 
5591   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5592   /// The values needed are taken from the arguments of \p F starting at
5593   /// position \p ArgNo.
5594   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5595                                    unsigned ArgNo, Instruction &IP) {
5596     assert(PrivType && "Expected privatizable type!");
5597 
5598     IRBuilder<NoFolder> IRB(&IP);
5599     const DataLayout &DL = F.getParent()->getDataLayout();
5600 
5601     // Traverse the type, build GEPs and stores.
5602     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5603       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5604       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5605         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5606         Value *Ptr =
5607             constructPointer(PointeeTy, PrivType, &Base,
5608                              PrivStructLayout->getElementOffset(u), IRB, DL);
5609         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5610       }
5611     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5612       Type *PointeeTy = PrivArrayType->getElementType();
5613       Type *PointeePtrTy = PointeeTy->getPointerTo();
5614       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5615       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5616         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5617                                       u * PointeeTySize, IRB, DL);
5618         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5619       }
5620     } else {
5621       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5622     }
5623   }
5624 
5625   /// Extract values from \p Base according to the type \p PrivType at the
5626   /// call position \p ACS. The values are appended to \p ReplacementValues.
5627   void createReplacementValues(Align Alignment, Type *PrivType,
5628                                AbstractCallSite ACS, Value *Base,
5629                                SmallVectorImpl<Value *> &ReplacementValues) {
5630     assert(Base && "Expected base value!");
5631     assert(PrivType && "Expected privatizable type!");
5632     Instruction *IP = ACS.getInstruction();
5633 
5634     IRBuilder<NoFolder> IRB(IP);
5635     const DataLayout &DL = IP->getModule()->getDataLayout();
5636 
5637     if (Base->getType()->getPointerElementType() != PrivType)
5638       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5639                                                  "", ACS.getInstruction());
5640 
5641     // Traverse the type, build GEPs and loads.
5642     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5643       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5644       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5645         Type *PointeeTy = PrivStructType->getElementType(u);
5646         Value *Ptr =
5647             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5648                              PrivStructLayout->getElementOffset(u), IRB, DL);
5649         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5650         L->setAlignment(Alignment);
5651         ReplacementValues.push_back(L);
5652       }
5653     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5654       Type *PointeeTy = PrivArrayType->getElementType();
5655       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5656       Type *PointeePtrTy = PointeeTy->getPointerTo();
5657       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5658         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5659                                       u * PointeeTySize, IRB, DL);
5660         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5661         L->setAlignment(Alignment);
5662         ReplacementValues.push_back(L);
5663       }
5664     } else {
5665       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5666       L->setAlignment(Alignment);
5667       ReplacementValues.push_back(L);
5668     }
5669   }
5670 
5671   /// See AbstractAttribute::manifest(...)
5672   ChangeStatus manifest(Attributor &A) override {
5673     if (!PrivatizableType.hasValue())
5674       return ChangeStatus::UNCHANGED;
5675     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5676 
5677     // Collect all tail calls in the function as we cannot allow new allocas to
5678     // escape into tail recursion.
5679     // TODO: Be smarter about new allocas escaping into tail calls.
5680     SmallVector<CallInst *, 16> TailCalls;
5681     if (!A.checkForAllInstructions(
5682             [&](Instruction &I) {
5683               CallInst &CI = cast<CallInst>(I);
5684               if (CI.isTailCall())
5685                 TailCalls.push_back(&CI);
5686               return true;
5687             },
5688             *this, {Instruction::Call}))
5689       return ChangeStatus::UNCHANGED;
5690 
5691     Argument *Arg = getAssociatedArgument();
5692     // Query AAAlign attribute for alignment of associated argument to
5693     // determine the best alignment of loads.
5694     const auto &AlignAA =
5695         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5696 
5697     // Callback to repair the associated function. A new alloca is placed at the
5698     // beginning and initialized with the values passed through arguments. The
5699     // new alloca replaces the use of the old pointer argument.
5700     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5701         [=](const Attributor::ArgumentReplacementInfo &ARI,
5702             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5703           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5704           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5705           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5706                                            Arg->getName() + ".priv", IP);
5707           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5708                                ArgIt->getArgNo(), *IP);
5709 
5710           if (AI->getType() != Arg->getType())
5711             AI =
5712                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5713           Arg->replaceAllUsesWith(AI);
5714 
5715           for (CallInst *CI : TailCalls)
5716             CI->setTailCall(false);
5717         };
5718 
5719     // Callback to repair a call site of the associated function. The elements
5720     // of the privatizable type are loaded prior to the call and passed to the
5721     // new function version.
5722     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5723         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5724                       AbstractCallSite ACS,
5725                       SmallVectorImpl<Value *> &NewArgOperands) {
5726           // When no alignment is specified for the load instruction,
5727           // natural alignment is assumed.
5728           createReplacementValues(
5729               assumeAligned(AlignAA.getAssumedAlign()),
5730               PrivatizableType.getValue(), ACS,
5731               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5732               NewArgOperands);
5733         };
5734 
5735     // Collect the types that will replace the privatizable type in the function
5736     // signature.
5737     SmallVector<Type *, 16> ReplacementTypes;
5738     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5739 
5740     // Register a rewrite of the argument.
5741     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5742                                            std::move(FnRepairCB),
5743                                            std::move(ACSRepairCB)))
5744       return ChangeStatus::CHANGED;
5745     return ChangeStatus::UNCHANGED;
5746   }
5747 
5748   /// See AbstractAttribute::trackStatistics()
5749   void trackStatistics() const override {
5750     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5751   }
5752 };
5753 
5754 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5755   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5756       : AAPrivatizablePtrImpl(IRP, A) {}
5757 
5758   /// See AbstractAttribute::initialize(...).
5759   virtual void initialize(Attributor &A) override {
5760     // TODO: We can privatize more than arguments.
5761     indicatePessimisticFixpoint();
5762   }
5763 
5764   ChangeStatus updateImpl(Attributor &A) override {
5765     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5766                      "updateImpl will not be called");
5767   }
5768 
5769   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5770   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5771     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5772     if (!Obj) {
5773       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5774       return nullptr;
5775     }
5776 
5777     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5778       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5779         if (CI->isOne())
5780           return Obj->getType()->getPointerElementType();
5781     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5782       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5783           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5784       if (PrivArgAA.isAssumedPrivatizablePtr())
5785         return Obj->getType()->getPointerElementType();
5786     }
5787 
5788     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5789                          "alloca nor privatizable argument: "
5790                       << *Obj << "!\n");
5791     return nullptr;
5792   }
5793 
5794   /// See AbstractAttribute::trackStatistics()
5795   void trackStatistics() const override {
5796     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5797   }
5798 };
5799 
5800 struct AAPrivatizablePtrCallSiteArgument final
5801     : public AAPrivatizablePtrFloating {
5802   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5803       : AAPrivatizablePtrFloating(IRP, A) {}
5804 
5805   /// See AbstractAttribute::initialize(...).
5806   void initialize(Attributor &A) override {
5807     if (getIRPosition().hasAttr(Attribute::ByVal))
5808       indicateOptimisticFixpoint();
5809   }
5810 
5811   /// See AbstractAttribute::updateImpl(...).
5812   ChangeStatus updateImpl(Attributor &A) override {
5813     PrivatizableType = identifyPrivatizableType(A);
5814     if (!PrivatizableType.hasValue())
5815       return ChangeStatus::UNCHANGED;
5816     if (!PrivatizableType.getValue())
5817       return indicatePessimisticFixpoint();
5818 
5819     const IRPosition &IRP = getIRPosition();
5820     auto &NoCaptureAA =
5821         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5822     if (!NoCaptureAA.isAssumedNoCapture()) {
5823       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5824       return indicatePessimisticFixpoint();
5825     }
5826 
5827     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5828     if (!NoAliasAA.isAssumedNoAlias()) {
5829       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5830       return indicatePessimisticFixpoint();
5831     }
5832 
5833     const auto &MemBehaviorAA =
5834         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5835     if (!MemBehaviorAA.isAssumedReadOnly()) {
5836       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5837       return indicatePessimisticFixpoint();
5838     }
5839 
5840     return ChangeStatus::UNCHANGED;
5841   }
5842 
5843   /// See AbstractAttribute::trackStatistics()
5844   void trackStatistics() const override {
5845     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5846   }
5847 };
5848 
5849 struct AAPrivatizablePtrCallSiteReturned final
5850     : public AAPrivatizablePtrFloating {
5851   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5852       : AAPrivatizablePtrFloating(IRP, A) {}
5853 
5854   /// See AbstractAttribute::initialize(...).
5855   void initialize(Attributor &A) override {
5856     // TODO: We can privatize more than arguments.
5857     indicatePessimisticFixpoint();
5858   }
5859 
5860   /// See AbstractAttribute::trackStatistics()
5861   void trackStatistics() const override {
5862     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5863   }
5864 };
5865 
5866 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5867   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5868       : AAPrivatizablePtrFloating(IRP, A) {}
5869 
5870   /// See AbstractAttribute::initialize(...).
5871   void initialize(Attributor &A) override {
5872     // TODO: We can privatize more than arguments.
5873     indicatePessimisticFixpoint();
5874   }
5875 
5876   /// See AbstractAttribute::trackStatistics()
5877   void trackStatistics() const override {
5878     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5879   }
5880 };
5881 
5882 /// -------------------- Memory Behavior Attributes ----------------------------
5883 /// Includes read-none, read-only, and write-only.
5884 /// ----------------------------------------------------------------------------
5885 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5886   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5887       : AAMemoryBehavior(IRP, A) {}
5888 
5889   /// See AbstractAttribute::initialize(...).
5890   void initialize(Attributor &A) override {
5891     intersectAssumedBits(BEST_STATE);
5892     getKnownStateFromValue(getIRPosition(), getState());
5893     AAMemoryBehavior::initialize(A);
5894   }
5895 
5896   /// Return the memory behavior information encoded in the IR for \p IRP.
5897   static void getKnownStateFromValue(const IRPosition &IRP,
5898                                      BitIntegerState &State,
5899                                      bool IgnoreSubsumingPositions = false) {
5900     SmallVector<Attribute, 2> Attrs;
5901     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5902     for (const Attribute &Attr : Attrs) {
5903       switch (Attr.getKindAsEnum()) {
5904       case Attribute::ReadNone:
5905         State.addKnownBits(NO_ACCESSES);
5906         break;
5907       case Attribute::ReadOnly:
5908         State.addKnownBits(NO_WRITES);
5909         break;
5910       case Attribute::WriteOnly:
5911         State.addKnownBits(NO_READS);
5912         break;
5913       default:
5914         llvm_unreachable("Unexpected attribute!");
5915       }
5916     }
5917 
5918     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5919       if (!I->mayReadFromMemory())
5920         State.addKnownBits(NO_READS);
5921       if (!I->mayWriteToMemory())
5922         State.addKnownBits(NO_WRITES);
5923     }
5924   }
5925 
5926   /// See AbstractAttribute::getDeducedAttributes(...).
5927   void getDeducedAttributes(LLVMContext &Ctx,
5928                             SmallVectorImpl<Attribute> &Attrs) const override {
5929     assert(Attrs.size() == 0);
5930     if (isAssumedReadNone())
5931       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5932     else if (isAssumedReadOnly())
5933       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5934     else if (isAssumedWriteOnly())
5935       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5936     assert(Attrs.size() <= 1);
5937   }
5938 
5939   /// See AbstractAttribute::manifest(...).
5940   ChangeStatus manifest(Attributor &A) override {
5941     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5942       return ChangeStatus::UNCHANGED;
5943 
5944     const IRPosition &IRP = getIRPosition();
5945 
5946     // Check if we would improve the existing attributes first.
5947     SmallVector<Attribute, 4> DeducedAttrs;
5948     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5949     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5950           return IRP.hasAttr(Attr.getKindAsEnum(),
5951                              /* IgnoreSubsumingPositions */ true);
5952         }))
5953       return ChangeStatus::UNCHANGED;
5954 
5955     // Clear existing attributes.
5956     IRP.removeAttrs(AttrKinds);
5957 
5958     // Use the generic manifest method.
5959     return IRAttribute::manifest(A);
5960   }
5961 
5962   /// See AbstractState::getAsStr().
5963   const std::string getAsStr() const override {
5964     if (isAssumedReadNone())
5965       return "readnone";
5966     if (isAssumedReadOnly())
5967       return "readonly";
5968     if (isAssumedWriteOnly())
5969       return "writeonly";
5970     return "may-read/write";
5971   }
5972 
5973   /// The set of IR attributes AAMemoryBehavior deals with.
5974   static const Attribute::AttrKind AttrKinds[3];
5975 };
5976 
5977 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5978     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5979 
5980 /// Memory behavior attribute for a floating value.
5981 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5982   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5983       : AAMemoryBehaviorImpl(IRP, A) {}
5984 
5985   /// See AbstractAttribute::initialize(...).
5986   void initialize(Attributor &A) override {
5987     AAMemoryBehaviorImpl::initialize(A);
5988     addUsesOf(A, getAssociatedValue());
5989   }
5990 
5991   /// See AbstractAttribute::updateImpl(...).
5992   ChangeStatus updateImpl(Attributor &A) override;
5993 
5994   /// See AbstractAttribute::trackStatistics()
5995   void trackStatistics() const override {
5996     if (isAssumedReadNone())
5997       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5998     else if (isAssumedReadOnly())
5999       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6000     else if (isAssumedWriteOnly())
6001       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6002   }
6003 
6004 private:
6005   /// Return true if users of \p UserI might access the underlying
6006   /// variable/location described by \p U and should therefore be analyzed.
6007   bool followUsersOfUseIn(Attributor &A, const Use *U,
6008                           const Instruction *UserI);
6009 
6010   /// Update the state according to the effect of use \p U in \p UserI.
6011   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6012 
6013 protected:
6014   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6015   void addUsesOf(Attributor &A, const Value &V);
6016 
6017   /// Container for (transitive) uses of the associated argument.
6018   SmallVector<const Use *, 8> Uses;
6019 
6020   /// Set to remember the uses we already traversed.
6021   SmallPtrSet<const Use *, 8> Visited;
6022 };
6023 
6024 /// Memory behavior attribute for function argument.
6025 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6026   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6027       : AAMemoryBehaviorFloating(IRP, A) {}
6028 
6029   /// See AbstractAttribute::initialize(...).
6030   void initialize(Attributor &A) override {
6031     intersectAssumedBits(BEST_STATE);
6032     const IRPosition &IRP = getIRPosition();
6033     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6034     // can query it when we use has/getAttr. That would allow us to reuse the
6035     // initialize of the base class here.
6036     bool HasByVal =
6037         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6038     getKnownStateFromValue(IRP, getState(),
6039                            /* IgnoreSubsumingPositions */ HasByVal);
6040 
6041     // Initialize the use vector with all direct uses of the associated value.
6042     Argument *Arg = getAssociatedArgument();
6043     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6044       indicatePessimisticFixpoint();
6045     } else {
6046       addUsesOf(A, *Arg);
6047     }
6048   }
6049 
6050   ChangeStatus manifest(Attributor &A) override {
6051     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6052     if (!getAssociatedValue().getType()->isPointerTy())
6053       return ChangeStatus::UNCHANGED;
6054 
6055     // TODO: From readattrs.ll: "inalloca parameters are always
6056     //                           considered written"
6057     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6058       removeKnownBits(NO_WRITES);
6059       removeAssumedBits(NO_WRITES);
6060     }
6061     return AAMemoryBehaviorFloating::manifest(A);
6062   }
6063 
6064   /// See AbstractAttribute::trackStatistics()
6065   void trackStatistics() const override {
6066     if (isAssumedReadNone())
6067       STATS_DECLTRACK_ARG_ATTR(readnone)
6068     else if (isAssumedReadOnly())
6069       STATS_DECLTRACK_ARG_ATTR(readonly)
6070     else if (isAssumedWriteOnly())
6071       STATS_DECLTRACK_ARG_ATTR(writeonly)
6072   }
6073 };
6074 
6075 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6076   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6077       : AAMemoryBehaviorArgument(IRP, A) {}
6078 
6079   /// See AbstractAttribute::initialize(...).
6080   void initialize(Attributor &A) override {
6081     // If we don't have an associated attribute this is either a variadic call
6082     // or an indirect call, either way, nothing to do here.
6083     Argument *Arg = getAssociatedArgument();
6084     if (!Arg) {
6085       indicatePessimisticFixpoint();
6086       return;
6087     }
6088     if (Arg->hasByValAttr()) {
6089       addKnownBits(NO_WRITES);
6090       removeKnownBits(NO_READS);
6091       removeAssumedBits(NO_READS);
6092     }
6093     AAMemoryBehaviorArgument::initialize(A);
6094     if (getAssociatedFunction()->isDeclaration())
6095       indicatePessimisticFixpoint();
6096   }
6097 
6098   /// See AbstractAttribute::updateImpl(...).
6099   ChangeStatus updateImpl(Attributor &A) override {
6100     // TODO: Once we have call site specific value information we can provide
6101     //       call site specific liveness liveness information and then it makes
6102     //       sense to specialize attributes for call sites arguments instead of
6103     //       redirecting requests to the callee argument.
6104     Argument *Arg = getAssociatedArgument();
6105     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6106     auto &ArgAA =
6107         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6108     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6109   }
6110 
6111   /// See AbstractAttribute::trackStatistics()
6112   void trackStatistics() const override {
6113     if (isAssumedReadNone())
6114       STATS_DECLTRACK_CSARG_ATTR(readnone)
6115     else if (isAssumedReadOnly())
6116       STATS_DECLTRACK_CSARG_ATTR(readonly)
6117     else if (isAssumedWriteOnly())
6118       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6119   }
6120 };
6121 
6122 /// Memory behavior attribute for a call site return position.
6123 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6124   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6125       : AAMemoryBehaviorFloating(IRP, A) {}
6126 
6127   /// See AbstractAttribute::initialize(...).
6128   void initialize(Attributor &A) override {
6129     AAMemoryBehaviorImpl::initialize(A);
6130     Function *F = getAssociatedFunction();
6131     if (!F || F->isDeclaration())
6132       indicatePessimisticFixpoint();
6133   }
6134 
6135   /// See AbstractAttribute::manifest(...).
6136   ChangeStatus manifest(Attributor &A) override {
6137     // We do not annotate returned values.
6138     return ChangeStatus::UNCHANGED;
6139   }
6140 
6141   /// See AbstractAttribute::trackStatistics()
6142   void trackStatistics() const override {}
6143 };
6144 
6145 /// An AA to represent the memory behavior function attributes.
6146 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6147   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6148       : AAMemoryBehaviorImpl(IRP, A) {}
6149 
6150   /// See AbstractAttribute::updateImpl(Attributor &A).
6151   virtual ChangeStatus updateImpl(Attributor &A) override;
6152 
6153   /// See AbstractAttribute::manifest(...).
6154   ChangeStatus manifest(Attributor &A) override {
6155     Function &F = cast<Function>(getAnchorValue());
6156     if (isAssumedReadNone()) {
6157       F.removeFnAttr(Attribute::ArgMemOnly);
6158       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6159       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6160     }
6161     return AAMemoryBehaviorImpl::manifest(A);
6162   }
6163 
6164   /// See AbstractAttribute::trackStatistics()
6165   void trackStatistics() const override {
6166     if (isAssumedReadNone())
6167       STATS_DECLTRACK_FN_ATTR(readnone)
6168     else if (isAssumedReadOnly())
6169       STATS_DECLTRACK_FN_ATTR(readonly)
6170     else if (isAssumedWriteOnly())
6171       STATS_DECLTRACK_FN_ATTR(writeonly)
6172   }
6173 };
6174 
6175 /// AAMemoryBehavior attribute for call sites.
6176 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6177   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6178       : AAMemoryBehaviorImpl(IRP, A) {}
6179 
6180   /// See AbstractAttribute::initialize(...).
6181   void initialize(Attributor &A) override {
6182     AAMemoryBehaviorImpl::initialize(A);
6183     Function *F = getAssociatedFunction();
6184     if (!F || F->isDeclaration())
6185       indicatePessimisticFixpoint();
6186   }
6187 
6188   /// See AbstractAttribute::updateImpl(...).
6189   ChangeStatus updateImpl(Attributor &A) override {
6190     // TODO: Once we have call site specific value information we can provide
6191     //       call site specific liveness liveness information and then it makes
6192     //       sense to specialize attributes for call sites arguments instead of
6193     //       redirecting requests to the callee argument.
6194     Function *F = getAssociatedFunction();
6195     const IRPosition &FnPos = IRPosition::function(*F);
6196     auto &FnAA =
6197         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6198     return clampStateAndIndicateChange(getState(), FnAA.getState());
6199   }
6200 
6201   /// See AbstractAttribute::trackStatistics()
6202   void trackStatistics() const override {
6203     if (isAssumedReadNone())
6204       STATS_DECLTRACK_CS_ATTR(readnone)
6205     else if (isAssumedReadOnly())
6206       STATS_DECLTRACK_CS_ATTR(readonly)
6207     else if (isAssumedWriteOnly())
6208       STATS_DECLTRACK_CS_ATTR(writeonly)
6209   }
6210 };
6211 
6212 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6213 
6214   // The current assumed state used to determine a change.
6215   auto AssumedState = getAssumed();
6216 
6217   auto CheckRWInst = [&](Instruction &I) {
6218     // If the instruction has an own memory behavior state, use it to restrict
6219     // the local state. No further analysis is required as the other memory
6220     // state is as optimistic as it gets.
6221     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6222       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6223           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6224       intersectAssumedBits(MemBehaviorAA.getAssumed());
6225       return !isAtFixpoint();
6226     }
6227 
6228     // Remove access kind modifiers if necessary.
6229     if (I.mayReadFromMemory())
6230       removeAssumedBits(NO_READS);
6231     if (I.mayWriteToMemory())
6232       removeAssumedBits(NO_WRITES);
6233     return !isAtFixpoint();
6234   };
6235 
6236   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6237     return indicatePessimisticFixpoint();
6238 
6239   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6240                                         : ChangeStatus::UNCHANGED;
6241 }
6242 
6243 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6244 
6245   const IRPosition &IRP = getIRPosition();
6246   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6247   AAMemoryBehavior::StateType &S = getState();
6248 
6249   // First, check the function scope. We take the known information and we avoid
6250   // work if the assumed information implies the current assumed information for
6251   // this attribute. This is a valid for all but byval arguments.
6252   Argument *Arg = IRP.getAssociatedArgument();
6253   AAMemoryBehavior::base_t FnMemAssumedState =
6254       AAMemoryBehavior::StateType::getWorstState();
6255   if (!Arg || !Arg->hasByValAttr()) {
6256     const auto &FnMemAA =
6257         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6258     FnMemAssumedState = FnMemAA.getAssumed();
6259     S.addKnownBits(FnMemAA.getKnown());
6260     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6261       return ChangeStatus::UNCHANGED;
6262   }
6263 
6264   // Make sure the value is not captured (except through "return"), if
6265   // it is, any information derived would be irrelevant anyway as we cannot
6266   // check the potential aliases introduced by the capture. However, no need
6267   // to fall back to anythign less optimistic than the function state.
6268   const auto &ArgNoCaptureAA =
6269       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6270   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6271     S.intersectAssumedBits(FnMemAssumedState);
6272     return ChangeStatus::CHANGED;
6273   }
6274 
6275   // The current assumed state used to determine a change.
6276   auto AssumedState = S.getAssumed();
6277 
6278   // Liveness information to exclude dead users.
6279   // TODO: Take the FnPos once we have call site specific liveness information.
6280   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6281       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6282       DepClassTy::NONE);
6283 
6284   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6285   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6286     const Use *U = Uses[i];
6287     Instruction *UserI = cast<Instruction>(U->getUser());
6288     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6289                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6290                       << "]\n");
6291     if (A.isAssumedDead(*U, this, &LivenessAA))
6292       continue;
6293 
6294     // Droppable users, e.g., llvm::assume does not actually perform any action.
6295     if (UserI->isDroppable())
6296       continue;
6297 
6298     // Check if the users of UserI should also be visited.
6299     if (followUsersOfUseIn(A, U, UserI))
6300       addUsesOf(A, *UserI);
6301 
6302     // If UserI might touch memory we analyze the use in detail.
6303     if (UserI->mayReadOrWriteMemory())
6304       analyzeUseIn(A, U, UserI);
6305   }
6306 
6307   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6308                                         : ChangeStatus::UNCHANGED;
6309 }
6310 
6311 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6312   SmallVector<const Use *, 8> WL;
6313   for (const Use &U : V.uses())
6314     WL.push_back(&U);
6315 
6316   while (!WL.empty()) {
6317     const Use *U = WL.pop_back_val();
6318     if (!Visited.insert(U).second)
6319       continue;
6320 
6321     const Instruction *UserI = cast<Instruction>(U->getUser());
6322     if (UserI->mayReadOrWriteMemory()) {
6323       Uses.push_back(U);
6324       continue;
6325     }
6326     if (!followUsersOfUseIn(A, U, UserI))
6327       continue;
6328     for (const Use &UU : UserI->uses())
6329       WL.push_back(&UU);
6330   }
6331 }
6332 
6333 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6334                                                   const Instruction *UserI) {
6335   // The loaded value is unrelated to the pointer argument, no need to
6336   // follow the users of the load.
6337   if (isa<LoadInst>(UserI))
6338     return false;
6339 
6340   // By default we follow all uses assuming UserI might leak information on U,
6341   // we have special handling for call sites operands though.
6342   const auto *CB = dyn_cast<CallBase>(UserI);
6343   if (!CB || !CB->isArgOperand(U))
6344     return true;
6345 
6346   // If the use is a call argument known not to be captured, the users of
6347   // the call do not need to be visited because they have to be unrelated to
6348   // the input. Note that this check is not trivial even though we disallow
6349   // general capturing of the underlying argument. The reason is that the
6350   // call might the argument "through return", which we allow and for which we
6351   // need to check call users.
6352   if (U->get()->getType()->isPointerTy()) {
6353     unsigned ArgNo = CB->getArgOperandNo(U);
6354     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6355         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6356     return !ArgNoCaptureAA.isAssumedNoCapture();
6357   }
6358 
6359   return true;
6360 }
6361 
6362 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6363                                             const Instruction *UserI) {
6364   assert(UserI->mayReadOrWriteMemory());
6365 
6366   switch (UserI->getOpcode()) {
6367   default:
6368     // TODO: Handle all atomics and other side-effect operations we know of.
6369     break;
6370   case Instruction::Load:
6371     // Loads cause the NO_READS property to disappear.
6372     removeAssumedBits(NO_READS);
6373     return;
6374 
6375   case Instruction::Store:
6376     // Stores cause the NO_WRITES property to disappear if the use is the
6377     // pointer operand. Note that we do assume that capturing was taken care of
6378     // somewhere else.
6379     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6380       removeAssumedBits(NO_WRITES);
6381     return;
6382 
6383   case Instruction::Call:
6384   case Instruction::CallBr:
6385   case Instruction::Invoke: {
6386     // For call sites we look at the argument memory behavior attribute (this
6387     // could be recursive!) in order to restrict our own state.
6388     const auto *CB = cast<CallBase>(UserI);
6389 
6390     // Give up on operand bundles.
6391     if (CB->isBundleOperand(U)) {
6392       indicatePessimisticFixpoint();
6393       return;
6394     }
6395 
6396     // Calling a function does read the function pointer, maybe write it if the
6397     // function is self-modifying.
6398     if (CB->isCallee(U)) {
6399       removeAssumedBits(NO_READS);
6400       break;
6401     }
6402 
6403     // Adjust the possible access behavior based on the information on the
6404     // argument.
6405     IRPosition Pos;
6406     if (U->get()->getType()->isPointerTy())
6407       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6408     else
6409       Pos = IRPosition::callsite_function(*CB);
6410     const auto &MemBehaviorAA =
6411         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6412     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6413     // and at least "known".
6414     intersectAssumedBits(MemBehaviorAA.getAssumed());
6415     return;
6416   }
6417   };
6418 
6419   // Generally, look at the "may-properties" and adjust the assumed state if we
6420   // did not trigger special handling before.
6421   if (UserI->mayReadFromMemory())
6422     removeAssumedBits(NO_READS);
6423   if (UserI->mayWriteToMemory())
6424     removeAssumedBits(NO_WRITES);
6425 }
6426 
6427 } // namespace
6428 
6429 /// -------------------- Memory Locations Attributes ---------------------------
6430 /// Includes read-none, argmemonly, inaccessiblememonly,
6431 /// inaccessiblememorargmemonly
6432 /// ----------------------------------------------------------------------------
6433 
6434 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6435     AAMemoryLocation::MemoryLocationsKind MLK) {
6436   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6437     return "all memory";
6438   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6439     return "no memory";
6440   std::string S = "memory:";
6441   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6442     S += "stack,";
6443   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6444     S += "constant,";
6445   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6446     S += "internal global,";
6447   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6448     S += "external global,";
6449   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6450     S += "argument,";
6451   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6452     S += "inaccessible,";
6453   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6454     S += "malloced,";
6455   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6456     S += "unknown,";
6457   S.pop_back();
6458   return S;
6459 }
6460 
6461 namespace {
6462 struct AAMemoryLocationImpl : public AAMemoryLocation {
6463 
6464   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6465       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6466     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6467       AccessKind2Accesses[u] = nullptr;
6468   }
6469 
6470   ~AAMemoryLocationImpl() {
6471     // The AccessSets are allocated via a BumpPtrAllocator, we call
6472     // the destructor manually.
6473     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6474       if (AccessKind2Accesses[u])
6475         AccessKind2Accesses[u]->~AccessSet();
6476   }
6477 
6478   /// See AbstractAttribute::initialize(...).
6479   void initialize(Attributor &A) override {
6480     intersectAssumedBits(BEST_STATE);
6481     getKnownStateFromValue(A, getIRPosition(), getState());
6482     AAMemoryLocation::initialize(A);
6483   }
6484 
6485   /// Return the memory behavior information encoded in the IR for \p IRP.
6486   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6487                                      BitIntegerState &State,
6488                                      bool IgnoreSubsumingPositions = false) {
6489     // For internal functions we ignore `argmemonly` and
6490     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6491     // constant propagation. It is unclear if this is the best way but it is
6492     // unlikely this will cause real performance problems. If we are deriving
6493     // attributes for the anchor function we even remove the attribute in
6494     // addition to ignoring it.
6495     bool UseArgMemOnly = true;
6496     Function *AnchorFn = IRP.getAnchorScope();
6497     if (AnchorFn && A.isRunOn(*AnchorFn))
6498       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6499 
6500     SmallVector<Attribute, 2> Attrs;
6501     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6502     for (const Attribute &Attr : Attrs) {
6503       switch (Attr.getKindAsEnum()) {
6504       case Attribute::ReadNone:
6505         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6506         break;
6507       case Attribute::InaccessibleMemOnly:
6508         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6509         break;
6510       case Attribute::ArgMemOnly:
6511         if (UseArgMemOnly)
6512           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6513         else
6514           IRP.removeAttrs({Attribute::ArgMemOnly});
6515         break;
6516       case Attribute::InaccessibleMemOrArgMemOnly:
6517         if (UseArgMemOnly)
6518           State.addKnownBits(inverseLocation(
6519               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6520         else
6521           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6522         break;
6523       default:
6524         llvm_unreachable("Unexpected attribute!");
6525       }
6526     }
6527   }
6528 
6529   /// See AbstractAttribute::getDeducedAttributes(...).
6530   void getDeducedAttributes(LLVMContext &Ctx,
6531                             SmallVectorImpl<Attribute> &Attrs) const override {
6532     assert(Attrs.size() == 0);
6533     if (isAssumedReadNone()) {
6534       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6535     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6536       if (isAssumedInaccessibleMemOnly())
6537         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6538       else if (isAssumedArgMemOnly())
6539         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6540       else if (isAssumedInaccessibleOrArgMemOnly())
6541         Attrs.push_back(
6542             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6543     }
6544     assert(Attrs.size() <= 1);
6545   }
6546 
6547   /// See AbstractAttribute::manifest(...).
6548   ChangeStatus manifest(Attributor &A) override {
6549     const IRPosition &IRP = getIRPosition();
6550 
6551     // Check if we would improve the existing attributes first.
6552     SmallVector<Attribute, 4> DeducedAttrs;
6553     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6554     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6555           return IRP.hasAttr(Attr.getKindAsEnum(),
6556                              /* IgnoreSubsumingPositions */ true);
6557         }))
6558       return ChangeStatus::UNCHANGED;
6559 
6560     // Clear existing attributes.
6561     IRP.removeAttrs(AttrKinds);
6562     if (isAssumedReadNone())
6563       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6564 
6565     // Use the generic manifest method.
6566     return IRAttribute::manifest(A);
6567   }
6568 
6569   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6570   bool checkForAllAccessesToMemoryKind(
6571       function_ref<bool(const Instruction *, const Value *, AccessKind,
6572                         MemoryLocationsKind)>
6573           Pred,
6574       MemoryLocationsKind RequestedMLK) const override {
6575     if (!isValidState())
6576       return false;
6577 
6578     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6579     if (AssumedMLK == NO_LOCATIONS)
6580       return true;
6581 
6582     unsigned Idx = 0;
6583     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6584          CurMLK *= 2, ++Idx) {
6585       if (CurMLK & RequestedMLK)
6586         continue;
6587 
6588       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6589         for (const AccessInfo &AI : *Accesses)
6590           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6591             return false;
6592     }
6593 
6594     return true;
6595   }
6596 
6597   ChangeStatus indicatePessimisticFixpoint() override {
6598     // If we give up and indicate a pessimistic fixpoint this instruction will
6599     // become an access for all potential access kinds:
6600     // TODO: Add pointers for argmemonly and globals to improve the results of
6601     //       checkForAllAccessesToMemoryKind.
6602     bool Changed = false;
6603     MemoryLocationsKind KnownMLK = getKnown();
6604     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6605     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6606       if (!(CurMLK & KnownMLK))
6607         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6608                                   getAccessKindFromInst(I));
6609     return AAMemoryLocation::indicatePessimisticFixpoint();
6610   }
6611 
6612 protected:
6613   /// Helper struct to tie together an instruction that has a read or write
6614   /// effect with the pointer it accesses (if any).
6615   struct AccessInfo {
6616 
6617     /// The instruction that caused the access.
6618     const Instruction *I;
6619 
6620     /// The base pointer that is accessed, or null if unknown.
6621     const Value *Ptr;
6622 
6623     /// The kind of access (read/write/read+write).
6624     AccessKind Kind;
6625 
6626     bool operator==(const AccessInfo &RHS) const {
6627       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6628     }
6629     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6630       if (LHS.I != RHS.I)
6631         return LHS.I < RHS.I;
6632       if (LHS.Ptr != RHS.Ptr)
6633         return LHS.Ptr < RHS.Ptr;
6634       if (LHS.Kind != RHS.Kind)
6635         return LHS.Kind < RHS.Kind;
6636       return false;
6637     }
6638   };
6639 
6640   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6641   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6642   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6643   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6644 
6645   /// Categorize the pointer arguments of CB that might access memory in
6646   /// AccessedLoc and update the state and access map accordingly.
6647   void
6648   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6649                                      AAMemoryLocation::StateType &AccessedLocs,
6650                                      bool &Changed);
6651 
6652   /// Return the kind(s) of location that may be accessed by \p V.
6653   AAMemoryLocation::MemoryLocationsKind
6654   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6655 
6656   /// Return the access kind as determined by \p I.
6657   AccessKind getAccessKindFromInst(const Instruction *I) {
6658     AccessKind AK = READ_WRITE;
6659     if (I) {
6660       AK = I->mayReadFromMemory() ? READ : NONE;
6661       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6662     }
6663     return AK;
6664   }
6665 
6666   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6667   /// an access of kind \p AK to a \p MLK memory location with the access
6668   /// pointer \p Ptr.
6669   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6670                                  MemoryLocationsKind MLK, const Instruction *I,
6671                                  const Value *Ptr, bool &Changed,
6672                                  AccessKind AK = READ_WRITE) {
6673 
6674     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6675     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6676     if (!Accesses)
6677       Accesses = new (Allocator) AccessSet();
6678     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6679     State.removeAssumedBits(MLK);
6680   }
6681 
6682   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6683   /// arguments, and update the state and access map accordingly.
6684   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6685                           AAMemoryLocation::StateType &State, bool &Changed);
6686 
6687   /// Used to allocate access sets.
6688   BumpPtrAllocator &Allocator;
6689 
6690   /// The set of IR attributes AAMemoryLocation deals with.
6691   static const Attribute::AttrKind AttrKinds[4];
6692 };
6693 
6694 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6695     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6696     Attribute::InaccessibleMemOrArgMemOnly};
6697 
6698 void AAMemoryLocationImpl::categorizePtrValue(
6699     Attributor &A, const Instruction &I, const Value &Ptr,
6700     AAMemoryLocation::StateType &State, bool &Changed) {
6701   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6702                     << Ptr << " ["
6703                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6704 
6705   auto StripGEPCB = [](Value *V) -> Value * {
6706     auto *GEP = dyn_cast<GEPOperator>(V);
6707     while (GEP) {
6708       V = GEP->getPointerOperand();
6709       GEP = dyn_cast<GEPOperator>(V);
6710     }
6711     return V;
6712   };
6713 
6714   auto VisitValueCB = [&](Value &V, const Instruction *,
6715                           AAMemoryLocation::StateType &T,
6716                           bool Stripped) -> bool {
6717     // TODO: recognize the TBAA used for constant accesses.
6718     MemoryLocationsKind MLK = NO_LOCATIONS;
6719     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6720     if (isa<UndefValue>(V))
6721       return true;
6722     if (auto *Arg = dyn_cast<Argument>(&V)) {
6723       if (Arg->hasByValAttr())
6724         MLK = NO_LOCAL_MEM;
6725       else
6726         MLK = NO_ARGUMENT_MEM;
6727     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6728       // Reading constant memory is not treated as a read "effect" by the
6729       // function attr pass so we won't neither. Constants defined by TBAA are
6730       // similar. (We know we do not write it because it is constant.)
6731       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6732         if (GVar->isConstant())
6733           return true;
6734 
6735       if (GV->hasLocalLinkage())
6736         MLK = NO_GLOBAL_INTERNAL_MEM;
6737       else
6738         MLK = NO_GLOBAL_EXTERNAL_MEM;
6739     } else if (isa<ConstantPointerNull>(V) &&
6740                !NullPointerIsDefined(getAssociatedFunction(),
6741                                      V.getType()->getPointerAddressSpace())) {
6742       return true;
6743     } else if (isa<AllocaInst>(V)) {
6744       MLK = NO_LOCAL_MEM;
6745     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6746       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6747           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6748       if (NoAliasAA.isAssumedNoAlias())
6749         MLK = NO_MALLOCED_MEM;
6750       else
6751         MLK = NO_UNKOWN_MEM;
6752     } else {
6753       MLK = NO_UNKOWN_MEM;
6754     }
6755 
6756     assert(MLK != NO_LOCATIONS && "No location specified!");
6757     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6758                               getAccessKindFromInst(&I));
6759     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6760                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6761                       << "\n");
6762     return true;
6763   };
6764 
6765   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6766           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6767           /* UseValueSimplify */ true,
6768           /* MaxValues */ 32, StripGEPCB)) {
6769     LLVM_DEBUG(
6770         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6771     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6772                               getAccessKindFromInst(&I));
6773   } else {
6774     LLVM_DEBUG(
6775         dbgs()
6776         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6777         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6778   }
6779 }
6780 
6781 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6782     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6783     bool &Changed) {
6784   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6785 
6786     // Skip non-pointer arguments.
6787     const Value *ArgOp = CB.getArgOperand(ArgNo);
6788     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6789       continue;
6790 
6791     // Skip readnone arguments.
6792     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6793     const auto &ArgOpMemLocationAA =
6794         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6795 
6796     if (ArgOpMemLocationAA.isAssumedReadNone())
6797       continue;
6798 
6799     // Categorize potentially accessed pointer arguments as if there was an
6800     // access instruction with them as pointer.
6801     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6802   }
6803 }
6804 
6805 AAMemoryLocation::MemoryLocationsKind
6806 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6807                                                   bool &Changed) {
6808   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6809                     << I << "\n");
6810 
6811   AAMemoryLocation::StateType AccessedLocs;
6812   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6813 
6814   if (auto *CB = dyn_cast<CallBase>(&I)) {
6815 
6816     // First check if we assume any memory is access is visible.
6817     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6818         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6819     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6820                       << " [" << CBMemLocationAA << "]\n");
6821 
6822     if (CBMemLocationAA.isAssumedReadNone())
6823       return NO_LOCATIONS;
6824 
6825     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6826       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6827                                 Changed, getAccessKindFromInst(&I));
6828       return AccessedLocs.getAssumed();
6829     }
6830 
6831     uint32_t CBAssumedNotAccessedLocs =
6832         CBMemLocationAA.getAssumedNotAccessedLocation();
6833 
6834     // Set the argmemonly and global bit as we handle them separately below.
6835     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6836         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6837 
6838     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6839       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6840         continue;
6841       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6842                                 getAccessKindFromInst(&I));
6843     }
6844 
6845     // Now handle global memory if it might be accessed. This is slightly tricky
6846     // as NO_GLOBAL_MEM has multiple bits set.
6847     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6848     if (HasGlobalAccesses) {
6849       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6850                             AccessKind Kind, MemoryLocationsKind MLK) {
6851         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6852                                   getAccessKindFromInst(&I));
6853         return true;
6854       };
6855       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6856               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6857         return AccessedLocs.getWorstState();
6858     }
6859 
6860     LLVM_DEBUG(
6861         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6862                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6863 
6864     // Now handle argument memory if it might be accessed.
6865     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6866     if (HasArgAccesses)
6867       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6868 
6869     LLVM_DEBUG(
6870         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6871                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6872 
6873     return AccessedLocs.getAssumed();
6874   }
6875 
6876   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6877     LLVM_DEBUG(
6878         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6879                << I << " [" << *Ptr << "]\n");
6880     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6881     return AccessedLocs.getAssumed();
6882   }
6883 
6884   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6885                     << I << "\n");
6886   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6887                             getAccessKindFromInst(&I));
6888   return AccessedLocs.getAssumed();
6889 }
6890 
6891 /// An AA to represent the memory behavior function attributes.
6892 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6893   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6894       : AAMemoryLocationImpl(IRP, A) {}
6895 
6896   /// See AbstractAttribute::updateImpl(Attributor &A).
6897   virtual ChangeStatus updateImpl(Attributor &A) override {
6898 
6899     const auto &MemBehaviorAA =
6900         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6901     if (MemBehaviorAA.isAssumedReadNone()) {
6902       if (MemBehaviorAA.isKnownReadNone())
6903         return indicateOptimisticFixpoint();
6904       assert(isAssumedReadNone() &&
6905              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6906       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6907       return ChangeStatus::UNCHANGED;
6908     }
6909 
6910     // The current assumed state used to determine a change.
6911     auto AssumedState = getAssumed();
6912     bool Changed = false;
6913 
6914     auto CheckRWInst = [&](Instruction &I) {
6915       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6916       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6917                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6918       removeAssumedBits(inverseLocation(MLK, false, false));
6919       // Stop once only the valid bit set in the *not assumed location*, thus
6920       // once we don't actually exclude any memory locations in the state.
6921       return getAssumedNotAccessedLocation() != VALID_STATE;
6922     };
6923 
6924     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6925       return indicatePessimisticFixpoint();
6926 
6927     Changed |= AssumedState != getAssumed();
6928     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6929   }
6930 
6931   /// See AbstractAttribute::trackStatistics()
6932   void trackStatistics() const override {
6933     if (isAssumedReadNone())
6934       STATS_DECLTRACK_FN_ATTR(readnone)
6935     else if (isAssumedArgMemOnly())
6936       STATS_DECLTRACK_FN_ATTR(argmemonly)
6937     else if (isAssumedInaccessibleMemOnly())
6938       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6939     else if (isAssumedInaccessibleOrArgMemOnly())
6940       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6941   }
6942 };
6943 
6944 /// AAMemoryLocation attribute for call sites.
6945 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6946   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6947       : AAMemoryLocationImpl(IRP, A) {}
6948 
6949   /// See AbstractAttribute::initialize(...).
6950   void initialize(Attributor &A) override {
6951     AAMemoryLocationImpl::initialize(A);
6952     Function *F = getAssociatedFunction();
6953     if (!F || F->isDeclaration())
6954       indicatePessimisticFixpoint();
6955   }
6956 
6957   /// See AbstractAttribute::updateImpl(...).
6958   ChangeStatus updateImpl(Attributor &A) override {
6959     // TODO: Once we have call site specific value information we can provide
6960     //       call site specific liveness liveness information and then it makes
6961     //       sense to specialize attributes for call sites arguments instead of
6962     //       redirecting requests to the callee argument.
6963     Function *F = getAssociatedFunction();
6964     const IRPosition &FnPos = IRPosition::function(*F);
6965     auto &FnAA =
6966         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6967     bool Changed = false;
6968     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6969                           AccessKind Kind, MemoryLocationsKind MLK) {
6970       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6971                                 getAccessKindFromInst(I));
6972       return true;
6973     };
6974     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6975       return indicatePessimisticFixpoint();
6976     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6977   }
6978 
6979   /// See AbstractAttribute::trackStatistics()
6980   void trackStatistics() const override {
6981     if (isAssumedReadNone())
6982       STATS_DECLTRACK_CS_ATTR(readnone)
6983   }
6984 };
6985 
6986 /// ------------------ Value Constant Range Attribute -------------------------
6987 
6988 struct AAValueConstantRangeImpl : AAValueConstantRange {
6989   using StateType = IntegerRangeState;
6990   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6991       : AAValueConstantRange(IRP, A) {}
6992 
6993   /// See AbstractAttribute::getAsStr().
6994   const std::string getAsStr() const override {
6995     std::string Str;
6996     llvm::raw_string_ostream OS(Str);
6997     OS << "range(" << getBitWidth() << ")<";
6998     getKnown().print(OS);
6999     OS << " / ";
7000     getAssumed().print(OS);
7001     OS << ">";
7002     return OS.str();
7003   }
7004 
7005   /// Helper function to get a SCEV expr for the associated value at program
7006   /// point \p I.
7007   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7008     if (!getAnchorScope())
7009       return nullptr;
7010 
7011     ScalarEvolution *SE =
7012         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7013             *getAnchorScope());
7014 
7015     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7016         *getAnchorScope());
7017 
7018     if (!SE || !LI)
7019       return nullptr;
7020 
7021     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7022     if (!I)
7023       return S;
7024 
7025     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7026   }
7027 
7028   /// Helper function to get a range from SCEV for the associated value at
7029   /// program point \p I.
7030   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7031                                          const Instruction *I = nullptr) const {
7032     if (!getAnchorScope())
7033       return getWorstState(getBitWidth());
7034 
7035     ScalarEvolution *SE =
7036         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7037             *getAnchorScope());
7038 
7039     const SCEV *S = getSCEV(A, I);
7040     if (!SE || !S)
7041       return getWorstState(getBitWidth());
7042 
7043     return SE->getUnsignedRange(S);
7044   }
7045 
7046   /// Helper function to get a range from LVI for the associated value at
7047   /// program point \p I.
7048   ConstantRange
7049   getConstantRangeFromLVI(Attributor &A,
7050                           const Instruction *CtxI = nullptr) const {
7051     if (!getAnchorScope())
7052       return getWorstState(getBitWidth());
7053 
7054     LazyValueInfo *LVI =
7055         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7056             *getAnchorScope());
7057 
7058     if (!LVI || !CtxI)
7059       return getWorstState(getBitWidth());
7060     return LVI->getConstantRange(&getAssociatedValue(),
7061                                  const_cast<Instruction *>(CtxI));
7062   }
7063 
7064   /// See AAValueConstantRange::getKnownConstantRange(..).
7065   ConstantRange
7066   getKnownConstantRange(Attributor &A,
7067                         const Instruction *CtxI = nullptr) const override {
7068     if (!CtxI || CtxI == getCtxI())
7069       return getKnown();
7070 
7071     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7072     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7073     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7074   }
7075 
7076   /// See AAValueConstantRange::getAssumedConstantRange(..).
7077   ConstantRange
7078   getAssumedConstantRange(Attributor &A,
7079                           const Instruction *CtxI = nullptr) const override {
7080     // TODO: Make SCEV use Attributor assumption.
7081     //       We may be able to bound a variable range via assumptions in
7082     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7083     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7084 
7085     if (!CtxI || CtxI == getCtxI())
7086       return getAssumed();
7087 
7088     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7089     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7090     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7091   }
7092 
7093   /// See AbstractAttribute::initialize(..).
7094   void initialize(Attributor &A) override {
7095     // Intersect a range given by SCEV.
7096     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7097 
7098     // Intersect a range given by LVI.
7099     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7100   }
7101 
7102   /// Helper function to create MDNode for range metadata.
7103   static MDNode *
7104   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7105                             const ConstantRange &AssumedConstantRange) {
7106     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7107                                   Ty, AssumedConstantRange.getLower())),
7108                               ConstantAsMetadata::get(ConstantInt::get(
7109                                   Ty, AssumedConstantRange.getUpper()))};
7110     return MDNode::get(Ctx, LowAndHigh);
7111   }
7112 
7113   /// Return true if \p Assumed is included in \p KnownRanges.
7114   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7115 
7116     if (Assumed.isFullSet())
7117       return false;
7118 
7119     if (!KnownRanges)
7120       return true;
7121 
7122     // If multiple ranges are annotated in IR, we give up to annotate assumed
7123     // range for now.
7124 
7125     // TODO:  If there exists a known range which containts assumed range, we
7126     // can say assumed range is better.
7127     if (KnownRanges->getNumOperands() > 2)
7128       return false;
7129 
7130     ConstantInt *Lower =
7131         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7132     ConstantInt *Upper =
7133         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7134 
7135     ConstantRange Known(Lower->getValue(), Upper->getValue());
7136     return Known.contains(Assumed) && Known != Assumed;
7137   }
7138 
7139   /// Helper function to set range metadata.
7140   static bool
7141   setRangeMetadataIfisBetterRange(Instruction *I,
7142                                   const ConstantRange &AssumedConstantRange) {
7143     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7144     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7145       if (!AssumedConstantRange.isEmptySet()) {
7146         I->setMetadata(LLVMContext::MD_range,
7147                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7148                                                  AssumedConstantRange));
7149         return true;
7150       }
7151     }
7152     return false;
7153   }
7154 
7155   /// See AbstractAttribute::manifest()
7156   ChangeStatus manifest(Attributor &A) override {
7157     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7158     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7159     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7160 
7161     auto &V = getAssociatedValue();
7162     if (!AssumedConstantRange.isEmptySet() &&
7163         !AssumedConstantRange.isSingleElement()) {
7164       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7165         assert(I == getCtxI() && "Should not annotate an instruction which is "
7166                                  "not the context instruction");
7167         if (isa<CallInst>(I) || isa<LoadInst>(I))
7168           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7169             Changed = ChangeStatus::CHANGED;
7170       }
7171     }
7172 
7173     return Changed;
7174   }
7175 };
7176 
7177 struct AAValueConstantRangeArgument final
7178     : AAArgumentFromCallSiteArguments<
7179           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7180           true /* BridgeCallBaseContext */> {
7181   using Base = AAArgumentFromCallSiteArguments<
7182       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7183       true /* BridgeCallBaseContext */>;
7184   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7185       : Base(IRP, A) {}
7186 
7187   /// See AbstractAttribute::initialize(..).
7188   void initialize(Attributor &A) override {
7189     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7190       indicatePessimisticFixpoint();
7191     } else {
7192       Base::initialize(A);
7193     }
7194   }
7195 
7196   /// See AbstractAttribute::trackStatistics()
7197   void trackStatistics() const override {
7198     STATS_DECLTRACK_ARG_ATTR(value_range)
7199   }
7200 };
7201 
7202 struct AAValueConstantRangeReturned
7203     : AAReturnedFromReturnedValues<AAValueConstantRange,
7204                                    AAValueConstantRangeImpl,
7205                                    AAValueConstantRangeImpl::StateType,
7206                                    /* PropogateCallBaseContext */ true> {
7207   using Base =
7208       AAReturnedFromReturnedValues<AAValueConstantRange,
7209                                    AAValueConstantRangeImpl,
7210                                    AAValueConstantRangeImpl::StateType,
7211                                    /* PropogateCallBaseContext */ true>;
7212   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7213       : Base(IRP, A) {}
7214 
7215   /// See AbstractAttribute::initialize(...).
7216   void initialize(Attributor &A) override {}
7217 
7218   /// See AbstractAttribute::trackStatistics()
7219   void trackStatistics() const override {
7220     STATS_DECLTRACK_FNRET_ATTR(value_range)
7221   }
7222 };
7223 
7224 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7225   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7226       : AAValueConstantRangeImpl(IRP, A) {}
7227 
7228   /// See AbstractAttribute::initialize(...).
7229   void initialize(Attributor &A) override {
7230     AAValueConstantRangeImpl::initialize(A);
7231     Value &V = getAssociatedValue();
7232 
7233     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7234       unionAssumed(ConstantRange(C->getValue()));
7235       indicateOptimisticFixpoint();
7236       return;
7237     }
7238 
7239     if (isa<UndefValue>(&V)) {
7240       // Collapse the undef state to 0.
7241       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7242       indicateOptimisticFixpoint();
7243       return;
7244     }
7245 
7246     if (isa<CallBase>(&V))
7247       return;
7248 
7249     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7250       return;
7251     // If it is a load instruction with range metadata, use it.
7252     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7253       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7254         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7255         return;
7256       }
7257 
7258     // We can work with PHI and select instruction as we traverse their operands
7259     // during update.
7260     if (isa<SelectInst>(V) || isa<PHINode>(V))
7261       return;
7262 
7263     // Otherwise we give up.
7264     indicatePessimisticFixpoint();
7265 
7266     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7267                       << getAssociatedValue() << "\n");
7268   }
7269 
7270   bool calculateBinaryOperator(
7271       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7272       const Instruction *CtxI,
7273       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7274     Value *LHS = BinOp->getOperand(0);
7275     Value *RHS = BinOp->getOperand(1);
7276     // TODO: Allow non integers as well.
7277     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7278       return false;
7279 
7280     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7281         *this, IRPosition::value(*LHS, getCallBaseContext()),
7282         DepClassTy::REQUIRED);
7283     QuerriedAAs.push_back(&LHSAA);
7284     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7285 
7286     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7287         *this, IRPosition::value(*RHS, getCallBaseContext()),
7288         DepClassTy::REQUIRED);
7289     QuerriedAAs.push_back(&RHSAA);
7290     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7291 
7292     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7293 
7294     T.unionAssumed(AssumedRange);
7295 
7296     // TODO: Track a known state too.
7297 
7298     return T.isValidState();
7299   }
7300 
7301   bool calculateCastInst(
7302       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7303       const Instruction *CtxI,
7304       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7305     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7306     // TODO: Allow non integers as well.
7307     Value &OpV = *CastI->getOperand(0);
7308     if (!OpV.getType()->isIntegerTy())
7309       return false;
7310 
7311     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7312         *this, IRPosition::value(OpV, getCallBaseContext()),
7313         DepClassTy::REQUIRED);
7314     QuerriedAAs.push_back(&OpAA);
7315     T.unionAssumed(
7316         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7317     return T.isValidState();
7318   }
7319 
7320   bool
7321   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7322                    const Instruction *CtxI,
7323                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7324     Value *LHS = CmpI->getOperand(0);
7325     Value *RHS = CmpI->getOperand(1);
7326     // TODO: Allow non integers as well.
7327     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7328       return false;
7329 
7330     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7331         *this, IRPosition::value(*LHS, getCallBaseContext()),
7332         DepClassTy::REQUIRED);
7333     QuerriedAAs.push_back(&LHSAA);
7334     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7335         *this, IRPosition::value(*RHS, getCallBaseContext()),
7336         DepClassTy::REQUIRED);
7337     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7338     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7339 
7340     // If one of them is empty set, we can't decide.
7341     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7342       return true;
7343 
7344     bool MustTrue = false, MustFalse = false;
7345 
7346     auto AllowedRegion =
7347         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7348 
7349     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7350       MustFalse = true;
7351 
7352     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7353       MustTrue = true;
7354 
7355     assert((!MustTrue || !MustFalse) &&
7356            "Either MustTrue or MustFalse should be false!");
7357 
7358     if (MustTrue)
7359       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7360     else if (MustFalse)
7361       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7362     else
7363       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7364 
7365     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7366                       << " " << RHSAA << "\n");
7367 
7368     // TODO: Track a known state too.
7369     return T.isValidState();
7370   }
7371 
7372   /// See AbstractAttribute::updateImpl(...).
7373   ChangeStatus updateImpl(Attributor &A) override {
7374     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7375                             IntegerRangeState &T, bool Stripped) -> bool {
7376       Instruction *I = dyn_cast<Instruction>(&V);
7377       if (!I || isa<CallBase>(I)) {
7378 
7379         // If the value is not instruction, we query AA to Attributor.
7380         const auto &AA = A.getAAFor<AAValueConstantRange>(
7381             *this, IRPosition::value(V, getCallBaseContext()),
7382             DepClassTy::REQUIRED);
7383 
7384         // Clamp operator is not used to utilize a program point CtxI.
7385         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7386 
7387         return T.isValidState();
7388       }
7389 
7390       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7391       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7392         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7393           return false;
7394       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7395         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7396           return false;
7397       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7398         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7399           return false;
7400       } else {
7401         // Give up with other instructions.
7402         // TODO: Add other instructions
7403 
7404         T.indicatePessimisticFixpoint();
7405         return false;
7406       }
7407 
7408       // Catch circular reasoning in a pessimistic way for now.
7409       // TODO: Check how the range evolves and if we stripped anything, see also
7410       //       AADereferenceable or AAAlign for similar situations.
7411       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7412         if (QueriedAA != this)
7413           continue;
7414         // If we are in a stady state we do not need to worry.
7415         if (T.getAssumed() == getState().getAssumed())
7416           continue;
7417         T.indicatePessimisticFixpoint();
7418       }
7419 
7420       return T.isValidState();
7421     };
7422 
7423     IntegerRangeState T(getBitWidth());
7424 
7425     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7426             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7427             /* UseValueSimplify */ false))
7428       return indicatePessimisticFixpoint();
7429 
7430     return clampStateAndIndicateChange(getState(), T);
7431   }
7432 
7433   /// See AbstractAttribute::trackStatistics()
7434   void trackStatistics() const override {
7435     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7436   }
7437 };
7438 
7439 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7440   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7441       : AAValueConstantRangeImpl(IRP, A) {}
7442 
7443   /// See AbstractAttribute::initialize(...).
7444   ChangeStatus updateImpl(Attributor &A) override {
7445     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7446                      "not be called");
7447   }
7448 
7449   /// See AbstractAttribute::trackStatistics()
7450   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7451 };
7452 
7453 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7454   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7455       : AAValueConstantRangeFunction(IRP, A) {}
7456 
7457   /// See AbstractAttribute::trackStatistics()
7458   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7459 };
7460 
7461 struct AAValueConstantRangeCallSiteReturned
7462     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7463                                      AAValueConstantRangeImpl,
7464                                      AAValueConstantRangeImpl::StateType,
7465                                      /* IntroduceCallBaseContext */ true> {
7466   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7467       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7468                                        AAValueConstantRangeImpl,
7469                                        AAValueConstantRangeImpl::StateType,
7470                                        /* IntroduceCallBaseContext */ true>(IRP,
7471                                                                             A) {
7472   }
7473 
7474   /// See AbstractAttribute::initialize(...).
7475   void initialize(Attributor &A) override {
7476     // If it is a load instruction with range metadata, use the metadata.
7477     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7478       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7479         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7480 
7481     AAValueConstantRangeImpl::initialize(A);
7482   }
7483 
7484   /// See AbstractAttribute::trackStatistics()
7485   void trackStatistics() const override {
7486     STATS_DECLTRACK_CSRET_ATTR(value_range)
7487   }
7488 };
7489 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7490   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7491       : AAValueConstantRangeFloating(IRP, A) {}
7492 
7493   /// See AbstractAttribute::manifest()
7494   ChangeStatus manifest(Attributor &A) override {
7495     return ChangeStatus::UNCHANGED;
7496   }
7497 
7498   /// See AbstractAttribute::trackStatistics()
7499   void trackStatistics() const override {
7500     STATS_DECLTRACK_CSARG_ATTR(value_range)
7501   }
7502 };
7503 
7504 /// ------------------ Potential Values Attribute -------------------------
7505 
7506 struct AAPotentialValuesImpl : AAPotentialValues {
7507   using StateType = PotentialConstantIntValuesState;
7508 
7509   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7510       : AAPotentialValues(IRP, A) {}
7511 
7512   /// See AbstractAttribute::getAsStr().
7513   const std::string getAsStr() const override {
7514     std::string Str;
7515     llvm::raw_string_ostream OS(Str);
7516     OS << getState();
7517     return OS.str();
7518   }
7519 
7520   /// See AbstractAttribute::updateImpl(...).
7521   ChangeStatus updateImpl(Attributor &A) override {
7522     return indicatePessimisticFixpoint();
7523   }
7524 };
7525 
7526 struct AAPotentialValuesArgument final
7527     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7528                                       PotentialConstantIntValuesState> {
7529   using Base =
7530       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7531                                       PotentialConstantIntValuesState>;
7532   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7533       : Base(IRP, A) {}
7534 
7535   /// See AbstractAttribute::initialize(..).
7536   void initialize(Attributor &A) override {
7537     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7538       indicatePessimisticFixpoint();
7539     } else {
7540       Base::initialize(A);
7541     }
7542   }
7543 
7544   /// See AbstractAttribute::trackStatistics()
7545   void trackStatistics() const override {
7546     STATS_DECLTRACK_ARG_ATTR(potential_values)
7547   }
7548 };
7549 
7550 struct AAPotentialValuesReturned
7551     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7552   using Base =
7553       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7554   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7555       : Base(IRP, A) {}
7556 
7557   /// See AbstractAttribute::trackStatistics()
7558   void trackStatistics() const override {
7559     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7560   }
7561 };
7562 
7563 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7564   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7565       : AAPotentialValuesImpl(IRP, A) {}
7566 
7567   /// See AbstractAttribute::initialize(..).
7568   void initialize(Attributor &A) override {
7569     Value &V = getAssociatedValue();
7570 
7571     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7572       unionAssumed(C->getValue());
7573       indicateOptimisticFixpoint();
7574       return;
7575     }
7576 
7577     if (isa<UndefValue>(&V)) {
7578       unionAssumedWithUndef();
7579       indicateOptimisticFixpoint();
7580       return;
7581     }
7582 
7583     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7584       return;
7585 
7586     if (isa<SelectInst>(V) || isa<PHINode>(V))
7587       return;
7588 
7589     indicatePessimisticFixpoint();
7590 
7591     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7592                       << getAssociatedValue() << "\n");
7593   }
7594 
7595   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7596                                 const APInt &RHS) {
7597     ICmpInst::Predicate Pred = ICI->getPredicate();
7598     switch (Pred) {
7599     case ICmpInst::ICMP_UGT:
7600       return LHS.ugt(RHS);
7601     case ICmpInst::ICMP_SGT:
7602       return LHS.sgt(RHS);
7603     case ICmpInst::ICMP_EQ:
7604       return LHS.eq(RHS);
7605     case ICmpInst::ICMP_UGE:
7606       return LHS.uge(RHS);
7607     case ICmpInst::ICMP_SGE:
7608       return LHS.sge(RHS);
7609     case ICmpInst::ICMP_ULT:
7610       return LHS.ult(RHS);
7611     case ICmpInst::ICMP_SLT:
7612       return LHS.slt(RHS);
7613     case ICmpInst::ICMP_NE:
7614       return LHS.ne(RHS);
7615     case ICmpInst::ICMP_ULE:
7616       return LHS.ule(RHS);
7617     case ICmpInst::ICMP_SLE:
7618       return LHS.sle(RHS);
7619     default:
7620       llvm_unreachable("Invalid ICmp predicate!");
7621     }
7622   }
7623 
7624   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7625                                  uint32_t ResultBitWidth) {
7626     Instruction::CastOps CastOp = CI->getOpcode();
7627     switch (CastOp) {
7628     default:
7629       llvm_unreachable("unsupported or not integer cast");
7630     case Instruction::Trunc:
7631       return Src.trunc(ResultBitWidth);
7632     case Instruction::SExt:
7633       return Src.sext(ResultBitWidth);
7634     case Instruction::ZExt:
7635       return Src.zext(ResultBitWidth);
7636     case Instruction::BitCast:
7637       return Src;
7638     }
7639   }
7640 
7641   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7642                                        const APInt &LHS, const APInt &RHS,
7643                                        bool &SkipOperation, bool &Unsupported) {
7644     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7645     // Unsupported is set to true when the binary operator is not supported.
7646     // SkipOperation is set to true when UB occur with the given operand pair
7647     // (LHS, RHS).
7648     // TODO: we should look at nsw and nuw keywords to handle operations
7649     //       that create poison or undef value.
7650     switch (BinOpcode) {
7651     default:
7652       Unsupported = true;
7653       return LHS;
7654     case Instruction::Add:
7655       return LHS + RHS;
7656     case Instruction::Sub:
7657       return LHS - RHS;
7658     case Instruction::Mul:
7659       return LHS * RHS;
7660     case Instruction::UDiv:
7661       if (RHS.isNullValue()) {
7662         SkipOperation = true;
7663         return LHS;
7664       }
7665       return LHS.udiv(RHS);
7666     case Instruction::SDiv:
7667       if (RHS.isNullValue()) {
7668         SkipOperation = true;
7669         return LHS;
7670       }
7671       return LHS.sdiv(RHS);
7672     case Instruction::URem:
7673       if (RHS.isNullValue()) {
7674         SkipOperation = true;
7675         return LHS;
7676       }
7677       return LHS.urem(RHS);
7678     case Instruction::SRem:
7679       if (RHS.isNullValue()) {
7680         SkipOperation = true;
7681         return LHS;
7682       }
7683       return LHS.srem(RHS);
7684     case Instruction::Shl:
7685       return LHS.shl(RHS);
7686     case Instruction::LShr:
7687       return LHS.lshr(RHS);
7688     case Instruction::AShr:
7689       return LHS.ashr(RHS);
7690     case Instruction::And:
7691       return LHS & RHS;
7692     case Instruction::Or:
7693       return LHS | RHS;
7694     case Instruction::Xor:
7695       return LHS ^ RHS;
7696     }
7697   }
7698 
7699   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7700                                            const APInt &LHS, const APInt &RHS) {
7701     bool SkipOperation = false;
7702     bool Unsupported = false;
7703     APInt Result =
7704         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7705     if (Unsupported)
7706       return false;
7707     // If SkipOperation is true, we can ignore this operand pair (L, R).
7708     if (!SkipOperation)
7709       unionAssumed(Result);
7710     return isValidState();
7711   }
7712 
7713   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7714     auto AssumedBefore = getAssumed();
7715     Value *LHS = ICI->getOperand(0);
7716     Value *RHS = ICI->getOperand(1);
7717     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7718       return indicatePessimisticFixpoint();
7719 
7720     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7721                                                 DepClassTy::REQUIRED);
7722     if (!LHSAA.isValidState())
7723       return indicatePessimisticFixpoint();
7724 
7725     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7726                                                 DepClassTy::REQUIRED);
7727     if (!RHSAA.isValidState())
7728       return indicatePessimisticFixpoint();
7729 
7730     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7731     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7732 
7733     // TODO: make use of undef flag to limit potential values aggressively.
7734     bool MaybeTrue = false, MaybeFalse = false;
7735     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7736     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7737       // The result of any comparison between undefs can be soundly replaced
7738       // with undef.
7739       unionAssumedWithUndef();
7740     } else if (LHSAA.undefIsContained()) {
7741       for (const APInt &R : RHSAAPVS) {
7742         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7743         MaybeTrue |= CmpResult;
7744         MaybeFalse |= !CmpResult;
7745         if (MaybeTrue & MaybeFalse)
7746           return indicatePessimisticFixpoint();
7747       }
7748     } else if (RHSAA.undefIsContained()) {
7749       for (const APInt &L : LHSAAPVS) {
7750         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7751         MaybeTrue |= CmpResult;
7752         MaybeFalse |= !CmpResult;
7753         if (MaybeTrue & MaybeFalse)
7754           return indicatePessimisticFixpoint();
7755       }
7756     } else {
7757       for (const APInt &L : LHSAAPVS) {
7758         for (const APInt &R : RHSAAPVS) {
7759           bool CmpResult = calculateICmpInst(ICI, L, R);
7760           MaybeTrue |= CmpResult;
7761           MaybeFalse |= !CmpResult;
7762           if (MaybeTrue & MaybeFalse)
7763             return indicatePessimisticFixpoint();
7764         }
7765       }
7766     }
7767     if (MaybeTrue)
7768       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7769     if (MaybeFalse)
7770       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7771     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7772                                          : ChangeStatus::CHANGED;
7773   }
7774 
7775   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7776     auto AssumedBefore = getAssumed();
7777     Value *LHS = SI->getTrueValue();
7778     Value *RHS = SI->getFalseValue();
7779     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7780       return indicatePessimisticFixpoint();
7781 
7782     // TODO: Use assumed simplified condition value
7783     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7784                                                 DepClassTy::REQUIRED);
7785     if (!LHSAA.isValidState())
7786       return indicatePessimisticFixpoint();
7787 
7788     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7789                                                 DepClassTy::REQUIRED);
7790     if (!RHSAA.isValidState())
7791       return indicatePessimisticFixpoint();
7792 
7793     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7794       // select i1 *, undef , undef => undef
7795       unionAssumedWithUndef();
7796     else {
7797       unionAssumed(LHSAA);
7798       unionAssumed(RHSAA);
7799     }
7800     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7801                                          : ChangeStatus::CHANGED;
7802   }
7803 
7804   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7805     auto AssumedBefore = getAssumed();
7806     if (!CI->isIntegerCast())
7807       return indicatePessimisticFixpoint();
7808     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7809     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7810     Value *Src = CI->getOperand(0);
7811     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7812                                                 DepClassTy::REQUIRED);
7813     if (!SrcAA.isValidState())
7814       return indicatePessimisticFixpoint();
7815     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7816     if (SrcAA.undefIsContained())
7817       unionAssumedWithUndef();
7818     else {
7819       for (const APInt &S : SrcAAPVS) {
7820         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7821         unionAssumed(T);
7822       }
7823     }
7824     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7825                                          : ChangeStatus::CHANGED;
7826   }
7827 
7828   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7829     auto AssumedBefore = getAssumed();
7830     Value *LHS = BinOp->getOperand(0);
7831     Value *RHS = BinOp->getOperand(1);
7832     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7833       return indicatePessimisticFixpoint();
7834 
7835     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7836                                                 DepClassTy::REQUIRED);
7837     if (!LHSAA.isValidState())
7838       return indicatePessimisticFixpoint();
7839 
7840     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7841                                                 DepClassTy::REQUIRED);
7842     if (!RHSAA.isValidState())
7843       return indicatePessimisticFixpoint();
7844 
7845     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7846     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7847     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7848 
7849     // TODO: make use of undef flag to limit potential values aggressively.
7850     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7851       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7852         return indicatePessimisticFixpoint();
7853     } else if (LHSAA.undefIsContained()) {
7854       for (const APInt &R : RHSAAPVS) {
7855         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7856           return indicatePessimisticFixpoint();
7857       }
7858     } else if (RHSAA.undefIsContained()) {
7859       for (const APInt &L : LHSAAPVS) {
7860         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7861           return indicatePessimisticFixpoint();
7862       }
7863     } else {
7864       for (const APInt &L : LHSAAPVS) {
7865         for (const APInt &R : RHSAAPVS) {
7866           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7867             return indicatePessimisticFixpoint();
7868         }
7869       }
7870     }
7871     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7872                                          : ChangeStatus::CHANGED;
7873   }
7874 
7875   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7876     auto AssumedBefore = getAssumed();
7877     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7878       Value *IncomingValue = PHI->getIncomingValue(u);
7879       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7880           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7881       if (!PotentialValuesAA.isValidState())
7882         return indicatePessimisticFixpoint();
7883       if (PotentialValuesAA.undefIsContained())
7884         unionAssumedWithUndef();
7885       else
7886         unionAssumed(PotentialValuesAA.getAssumed());
7887     }
7888     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7889                                          : ChangeStatus::CHANGED;
7890   }
7891 
7892   /// See AbstractAttribute::updateImpl(...).
7893   ChangeStatus updateImpl(Attributor &A) override {
7894     Value &V = getAssociatedValue();
7895     Instruction *I = dyn_cast<Instruction>(&V);
7896 
7897     if (auto *ICI = dyn_cast<ICmpInst>(I))
7898       return updateWithICmpInst(A, ICI);
7899 
7900     if (auto *SI = dyn_cast<SelectInst>(I))
7901       return updateWithSelectInst(A, SI);
7902 
7903     if (auto *CI = dyn_cast<CastInst>(I))
7904       return updateWithCastInst(A, CI);
7905 
7906     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7907       return updateWithBinaryOperator(A, BinOp);
7908 
7909     if (auto *PHI = dyn_cast<PHINode>(I))
7910       return updateWithPHINode(A, PHI);
7911 
7912     return indicatePessimisticFixpoint();
7913   }
7914 
7915   /// See AbstractAttribute::trackStatistics()
7916   void trackStatistics() const override {
7917     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7918   }
7919 };
7920 
7921 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7922   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7923       : AAPotentialValuesImpl(IRP, A) {}
7924 
7925   /// See AbstractAttribute::initialize(...).
7926   ChangeStatus updateImpl(Attributor &A) override {
7927     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7928                      "not be called");
7929   }
7930 
7931   /// See AbstractAttribute::trackStatistics()
7932   void trackStatistics() const override {
7933     STATS_DECLTRACK_FN_ATTR(potential_values)
7934   }
7935 };
7936 
7937 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7938   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7939       : AAPotentialValuesFunction(IRP, A) {}
7940 
7941   /// See AbstractAttribute::trackStatistics()
7942   void trackStatistics() const override {
7943     STATS_DECLTRACK_CS_ATTR(potential_values)
7944   }
7945 };
7946 
7947 struct AAPotentialValuesCallSiteReturned
7948     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7949   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7950       : AACallSiteReturnedFromReturned<AAPotentialValues,
7951                                        AAPotentialValuesImpl>(IRP, A) {}
7952 
7953   /// See AbstractAttribute::trackStatistics()
7954   void trackStatistics() const override {
7955     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7956   }
7957 };
7958 
7959 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7960   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7961       : AAPotentialValuesFloating(IRP, A) {}
7962 
7963   /// See AbstractAttribute::initialize(..).
7964   void initialize(Attributor &A) override {
7965     Value &V = getAssociatedValue();
7966 
7967     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7968       unionAssumed(C->getValue());
7969       indicateOptimisticFixpoint();
7970       return;
7971     }
7972 
7973     if (isa<UndefValue>(&V)) {
7974       unionAssumedWithUndef();
7975       indicateOptimisticFixpoint();
7976       return;
7977     }
7978   }
7979 
7980   /// See AbstractAttribute::updateImpl(...).
7981   ChangeStatus updateImpl(Attributor &A) override {
7982     Value &V = getAssociatedValue();
7983     auto AssumedBefore = getAssumed();
7984     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7985                                              DepClassTy::REQUIRED);
7986     const auto &S = AA.getAssumed();
7987     unionAssumed(S);
7988     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7989                                          : ChangeStatus::CHANGED;
7990   }
7991 
7992   /// See AbstractAttribute::trackStatistics()
7993   void trackStatistics() const override {
7994     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7995   }
7996 };
7997 
7998 /// ------------------------ NoUndef Attribute ---------------------------------
7999 struct AANoUndefImpl : AANoUndef {
8000   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8001 
8002   /// See AbstractAttribute::initialize(...).
8003   void initialize(Attributor &A) override {
8004     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8005       indicateOptimisticFixpoint();
8006       return;
8007     }
8008     Value &V = getAssociatedValue();
8009     if (isa<UndefValue>(V))
8010       indicatePessimisticFixpoint();
8011     else if (isa<FreezeInst>(V))
8012       indicateOptimisticFixpoint();
8013     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8014              isGuaranteedNotToBeUndefOrPoison(&V))
8015       indicateOptimisticFixpoint();
8016     else
8017       AANoUndef::initialize(A);
8018   }
8019 
8020   /// See followUsesInMBEC
8021   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8022                        AANoUndef::StateType &State) {
8023     const Value *UseV = U->get();
8024     const DominatorTree *DT = nullptr;
8025     AssumptionCache *AC = nullptr;
8026     InformationCache &InfoCache = A.getInfoCache();
8027     if (Function *F = getAnchorScope()) {
8028       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8029       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8030     }
8031     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8032     bool TrackUse = false;
8033     // Track use for instructions which must produce undef or poison bits when
8034     // at least one operand contains such bits.
8035     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8036       TrackUse = true;
8037     return TrackUse;
8038   }
8039 
8040   /// See AbstractAttribute::getAsStr().
8041   const std::string getAsStr() const override {
8042     return getAssumed() ? "noundef" : "may-undef-or-poison";
8043   }
8044 
8045   ChangeStatus manifest(Attributor &A) override {
8046     // We don't manifest noundef attribute for dead positions because the
8047     // associated values with dead positions would be replaced with undef
8048     // values.
8049     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8050       return ChangeStatus::UNCHANGED;
8051     // A position whose simplified value does not have any value is
8052     // considered to be dead. We don't manifest noundef in such positions for
8053     // the same reason above.
8054     auto &ValueSimplifyAA =
8055         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8056     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8057       return ChangeStatus::UNCHANGED;
8058     return AANoUndef::manifest(A);
8059   }
8060 };
8061 
8062 struct AANoUndefFloating : public AANoUndefImpl {
8063   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8064       : AANoUndefImpl(IRP, A) {}
8065 
8066   /// See AbstractAttribute::initialize(...).
8067   void initialize(Attributor &A) override {
8068     AANoUndefImpl::initialize(A);
8069     if (!getState().isAtFixpoint())
8070       if (Instruction *CtxI = getCtxI())
8071         followUsesInMBEC(*this, A, getState(), *CtxI);
8072   }
8073 
8074   /// See AbstractAttribute::updateImpl(...).
8075   ChangeStatus updateImpl(Attributor &A) override {
8076     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8077                             AANoUndef::StateType &T, bool Stripped) -> bool {
8078       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8079                                              DepClassTy::REQUIRED);
8080       if (!Stripped && this == &AA) {
8081         T.indicatePessimisticFixpoint();
8082       } else {
8083         const AANoUndef::StateType &S =
8084             static_cast<const AANoUndef::StateType &>(AA.getState());
8085         T ^= S;
8086       }
8087       return T.isValidState();
8088     };
8089 
8090     StateType T;
8091     if (!genericValueTraversal<AANoUndef, StateType>(
8092             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8093       return indicatePessimisticFixpoint();
8094 
8095     return clampStateAndIndicateChange(getState(), T);
8096   }
8097 
8098   /// See AbstractAttribute::trackStatistics()
8099   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8100 };
8101 
8102 struct AANoUndefReturned final
8103     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8104   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8105       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8106 
8107   /// See AbstractAttribute::trackStatistics()
8108   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8109 };
8110 
8111 struct AANoUndefArgument final
8112     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8113   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8114       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8115 
8116   /// See AbstractAttribute::trackStatistics()
8117   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8118 };
8119 
8120 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8121   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8122       : AANoUndefFloating(IRP, A) {}
8123 
8124   /// See AbstractAttribute::trackStatistics()
8125   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8126 };
8127 
8128 struct AANoUndefCallSiteReturned final
8129     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8130   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8131       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8132 
8133   /// See AbstractAttribute::trackStatistics()
8134   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8135 };
8136 } // namespace
8137 
8138 const char AAReturnedValues::ID = 0;
8139 const char AANoUnwind::ID = 0;
8140 const char AANoSync::ID = 0;
8141 const char AANoFree::ID = 0;
8142 const char AANonNull::ID = 0;
8143 const char AANoRecurse::ID = 0;
8144 const char AAWillReturn::ID = 0;
8145 const char AAUndefinedBehavior::ID = 0;
8146 const char AANoAlias::ID = 0;
8147 const char AAReachability::ID = 0;
8148 const char AANoReturn::ID = 0;
8149 const char AAIsDead::ID = 0;
8150 const char AADereferenceable::ID = 0;
8151 const char AAAlign::ID = 0;
8152 const char AANoCapture::ID = 0;
8153 const char AAValueSimplify::ID = 0;
8154 const char AAHeapToStack::ID = 0;
8155 const char AAPrivatizablePtr::ID = 0;
8156 const char AAMemoryBehavior::ID = 0;
8157 const char AAMemoryLocation::ID = 0;
8158 const char AAValueConstantRange::ID = 0;
8159 const char AAPotentialValues::ID = 0;
8160 const char AANoUndef::ID = 0;
8161 
8162 // Macro magic to create the static generator function for attributes that
8163 // follow the naming scheme.
8164 
8165 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8166   case IRPosition::PK:                                                         \
8167     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8168 
8169 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8170   case IRPosition::PK:                                                         \
8171     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8172     ++NumAAs;                                                                  \
8173     break;
8174 
8175 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8176   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8177     CLASS *AA = nullptr;                                                       \
8178     switch (IRP.getPositionKind()) {                                           \
8179       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8180       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8181       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8182       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8183       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8184       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8185       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8186       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8187     }                                                                          \
8188     return *AA;                                                                \
8189   }
8190 
8191 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8192   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8193     CLASS *AA = nullptr;                                                       \
8194     switch (IRP.getPositionKind()) {                                           \
8195       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8196       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8197       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8198       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8199       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8200       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8201       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8202       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8203     }                                                                          \
8204     return *AA;                                                                \
8205   }
8206 
8207 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8208   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8209     CLASS *AA = nullptr;                                                       \
8210     switch (IRP.getPositionKind()) {                                           \
8211       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8212       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8213       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8214       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8215       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8216       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8217       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8218       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8219     }                                                                          \
8220     return *AA;                                                                \
8221   }
8222 
8223 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8224   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8225     CLASS *AA = nullptr;                                                       \
8226     switch (IRP.getPositionKind()) {                                           \
8227       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8228       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8229       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8230       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8231       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8232       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8233       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8234       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8235     }                                                                          \
8236     return *AA;                                                                \
8237   }
8238 
8239 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8240   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8241     CLASS *AA = nullptr;                                                       \
8242     switch (IRP.getPositionKind()) {                                           \
8243       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8244       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8245       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8246       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8247       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8248       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8249       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8250       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8251     }                                                                          \
8252     return *AA;                                                                \
8253   }
8254 
8255 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8256 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8257 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8258 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8259 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8260 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8261 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8262 
8263 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8264 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8265 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8266 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8267 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8268 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8269 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8270 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8271 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8272 
8273 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8274 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8275 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8276 
8277 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8278 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8279 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8280 
8281 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8282 
8283 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8284 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8285 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8286 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8287 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8288 #undef SWITCH_PK_CREATE
8289 #undef SWITCH_PK_INV
8290