1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 /// Get pointer operand of memory accessing instruction. If \p I is
144 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
145 /// is set to false and the instruction is volatile, return nullptr.
146 static const Value *getPointerOperand(const Instruction *I,
147                                       bool AllowVolatile) {
148   if (!AllowVolatile && I->isVolatile())
149     return nullptr;
150 
151   if (auto *LI = dyn_cast<LoadInst>(I)) {
152     return LI->getPointerOperand();
153   }
154 
155   if (auto *SI = dyn_cast<StoreInst>(I)) {
156     return SI->getPointerOperand();
157   }
158 
159   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
160     return CXI->getPointerOperand();
161   }
162 
163   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
164     return RMWI->getPointerOperand();
165   }
166 
167   return nullptr;
168 }
169 
170 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
171 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
172 /// getelement pointer instructions that traverse the natural type of \p Ptr if
173 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
174 /// through a cast to i8*.
175 ///
176 /// TODO: This could probably live somewhere more prominantly if it doesn't
177 ///       already exist.
178 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
179                                int64_t Offset, IRBuilder<NoFolder> &IRB,
180                                const DataLayout &DL) {
181   assert(Offset >= 0 && "Negative offset not supported yet!");
182   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
183                     << "-bytes as " << *ResTy << "\n");
184 
185   if (Offset) {
186     SmallVector<Value *, 4> Indices;
187     std::string GEPName = Ptr->getName().str() + ".0";
188 
189     // Add 0 index to look through the pointer.
190     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
191            "Offset out of bounds");
192     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
193 
194     Type *Ty = PtrElemTy;
195     do {
196       auto *STy = dyn_cast<StructType>(Ty);
197       if (!STy)
198         // Non-aggregate type, we cast and make byte-wise progress now.
199         break;
200 
201       const StructLayout *SL = DL.getStructLayout(STy);
202       if (int64_t(SL->getSizeInBytes()) < Offset)
203         break;
204 
205       uint64_t Idx = SL->getElementContainingOffset(Offset);
206       assert(Idx < STy->getNumElements() && "Offset calculation error!");
207       uint64_t Rem = Offset - SL->getElementOffset(Idx);
208       Ty = STy->getElementType(Idx);
209 
210       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213       GEPName += "." + std::to_string(Idx);
214       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215       Offset = Rem;
216     } while (Offset);
217 
218     // Create a GEP for the indices collected above.
219     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
220 
221     // If an offset is left we use byte-wise adjustment.
222     if (Offset) {
223       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
224       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
225                           GEPName + ".b" + Twine(Offset));
226     }
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA,
257         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
258         DepClassTy::NONE);
259   bool AnyDead = false;
260 
261   using Item = std::pair<Value *, const Instruction *>;
262   SmallSet<Item, 16> Visited;
263   SmallVector<Item, 16> Worklist;
264   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
265 
266   int Iteration = 0;
267   do {
268     Item I = Worklist.pop_back_val();
269     Value *V = I.first;
270     CtxI = I.second;
271     if (StripCB)
272       V = StripCB(V);
273 
274     // Check if we should process the current value. To prevent endless
275     // recursion keep a record of the values we followed!
276     if (!Visited.insert(I).second)
277       continue;
278 
279     // Make sure we limit the compile time for complex expressions.
280     if (Iteration++ >= MaxValues)
281       return false;
282 
283     // Explicitly look through calls with a "returned" attribute if we do
284     // not have a pointer as stripPointerCasts only works on them.
285     Value *NewV = nullptr;
286     if (V->getType()->isPointerTy()) {
287       NewV = V->stripPointerCasts();
288     } else {
289       auto *CB = dyn_cast<CallBase>(V);
290       if (CB && CB->getCalledFunction()) {
291         for (Argument &Arg : CB->getCalledFunction()->args())
292           if (Arg.hasReturnedAttr()) {
293             NewV = CB->getArgOperand(Arg.getArgNo());
294             break;
295           }
296       }
297     }
298     if (NewV && NewV != V) {
299       Worklist.push_back({NewV, CtxI});
300       continue;
301     }
302 
303     // Look through select instructions, visit both potential values.
304     if (auto *SI = dyn_cast<SelectInst>(V)) {
305       Worklist.push_back({SI->getTrueValue(), CtxI});
306       Worklist.push_back({SI->getFalseValue(), CtxI});
307       continue;
308     }
309 
310     // Look through phi nodes, visit all live operands.
311     if (auto *PHI = dyn_cast<PHINode>(V)) {
312       assert(LivenessAA &&
313              "Expected liveness in the presence of instructions!");
314       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
315         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
316         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
317                             LivenessAA,
318                             /* CheckBBLivenessOnly */ true)) {
319           AnyDead = true;
320           continue;
321         }
322         Worklist.push_back(
323             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
324       }
325       continue;
326     }
327 
328     if (UseValueSimplify && !isa<Constant>(V)) {
329       bool UsedAssumedInformation = false;
330       Optional<Constant *> C =
331           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
332       if (!C.hasValue())
333         continue;
334       if (Value *NewV = C.getValue()) {
335         Worklist.push_back({NewV, CtxI});
336         continue;
337       }
338     }
339 
340     // Once a leaf is reached we inform the user through the callback.
341     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
342       return false;
343   } while (!Worklist.empty());
344 
345   // If we actually used liveness information so we have to record a dependence.
346   if (AnyDead)
347     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
348 
349   // All values have been visited.
350   return true;
351 }
352 
353 const Value *stripAndAccumulateMinimalOffsets(
354     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
355     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
356     bool UseAssumed = false) {
357 
358   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
359     const IRPosition &Pos = IRPosition::value(V);
360     // Only track dependence if we are going to use the assumed info.
361     const AAValueConstantRange &ValueConstantRangeAA =
362         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
363                                          UseAssumed ? DepClassTy::OPTIONAL
364                                                     : DepClassTy::NONE);
365     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
366                                      : ValueConstantRangeAA.getKnown();
367     // We can only use the lower part of the range because the upper part can
368     // be higher than what the value can really be.
369     ROffset = Range.getSignedMin();
370     return true;
371   };
372 
373   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
374                                                 AttributorAnalysis);
375 }
376 
377 static const Value *getMinimalBaseOfAccsesPointerOperand(
378     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
379     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
380   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
381   if (!Ptr)
382     return nullptr;
383   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
384   const Value *Base = stripAndAccumulateMinimalOffsets(
385       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
386 
387   BytesOffset = OffsetAPInt.getSExtValue();
388   return Base;
389 }
390 
391 static const Value *
392 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
393                                      const DataLayout &DL,
394                                      bool AllowNonInbounds = false) {
395   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
396   if (!Ptr)
397     return nullptr;
398 
399   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
400                                           AllowNonInbounds);
401 }
402 
403 /// Helper function to clamp a state \p S of type \p StateType with the
404 /// information in \p R and indicate/return if \p S did change (as-in update is
405 /// required to be run again).
406 template <typename StateType>
407 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
408   auto Assumed = S.getAssumed();
409   S ^= R;
410   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
411                                    : ChangeStatus::CHANGED;
412 }
413 
414 /// Clamp the information known for all returned values of a function
415 /// (identified by \p QueryingAA) into \p S.
416 template <typename AAType, typename StateType = typename AAType::StateType>
417 static void clampReturnedValueStates(
418     Attributor &A, const AAType &QueryingAA, StateType &S,
419     const IRPosition::CallBaseContext *CBContext = nullptr) {
420   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
421                     << QueryingAA << " into " << S << "\n");
422 
423   assert((QueryingAA.getIRPosition().getPositionKind() ==
424               IRPosition::IRP_RETURNED ||
425           QueryingAA.getIRPosition().getPositionKind() ==
426               IRPosition::IRP_CALL_SITE_RETURNED) &&
427          "Can only clamp returned value states for a function returned or call "
428          "site returned position!");
429 
430   // Use an optional state as there might not be any return values and we want
431   // to join (IntegerState::operator&) the state of all there are.
432   Optional<StateType> T;
433 
434   // Callback for each possibly returned value.
435   auto CheckReturnValue = [&](Value &RV) -> bool {
436     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
437     const AAType &AA =
438         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
439     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
440                       << " @ " << RVPos << "\n");
441     const StateType &AAS = AA.getState();
442     if (T.hasValue())
443       *T &= AAS;
444     else
445       T = AAS;
446     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
447                       << "\n");
448     return T->isValidState();
449   };
450 
451   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
452     S.indicatePessimisticFixpoint();
453   else if (T.hasValue())
454     S ^= *T;
455 }
456 
457 /// Helper class for generic deduction: return value -> returned position.
458 template <typename AAType, typename BaseType,
459           typename StateType = typename BaseType::StateType,
460           bool PropagateCallBaseContext = false>
461 struct AAReturnedFromReturnedValues : public BaseType {
462   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
463       : BaseType(IRP, A) {}
464 
465   /// See AbstractAttribute::updateImpl(...).
466   ChangeStatus updateImpl(Attributor &A) override {
467     StateType S(StateType::getBestState(this->getState()));
468     clampReturnedValueStates<AAType, StateType>(
469         A, *this, S,
470         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
471     // TODO: If we know we visited all returned values, thus no are assumed
472     // dead, we can take the known information from the state T.
473     return clampStateAndIndicateChange<StateType>(this->getState(), S);
474   }
475 };
476 
477 /// Clamp the information known at all call sites for a given argument
478 /// (identified by \p QueryingAA) into \p S.
479 template <typename AAType, typename StateType = typename AAType::StateType>
480 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
481                                         StateType &S) {
482   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
483                     << QueryingAA << " into " << S << "\n");
484 
485   assert(QueryingAA.getIRPosition().getPositionKind() ==
486              IRPosition::IRP_ARGUMENT &&
487          "Can only clamp call site argument states for an argument position!");
488 
489   // Use an optional state as there might not be any return values and we want
490   // to join (IntegerState::operator&) the state of all there are.
491   Optional<StateType> T;
492 
493   // The argument number which is also the call site argument number.
494   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
495 
496   auto CallSiteCheck = [&](AbstractCallSite ACS) {
497     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
498     // Check if a coresponding argument was found or if it is on not associated
499     // (which can happen for callback calls).
500     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
501       return false;
502 
503     const AAType &AA =
504         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
505     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
506                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
507     const StateType &AAS = AA.getState();
508     if (T.hasValue())
509       *T &= AAS;
510     else
511       T = AAS;
512     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
513                       << "\n");
514     return T->isValidState();
515   };
516 
517   bool AllCallSitesKnown;
518   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
519                               AllCallSitesKnown))
520     S.indicatePessimisticFixpoint();
521   else if (T.hasValue())
522     S ^= *T;
523 }
524 
525 /// This function is the bridge between argument position and the call base
526 /// context.
527 template <typename AAType, typename BaseType,
528           typename StateType = typename AAType::StateType>
529 bool getArgumentStateFromCallBaseContext(Attributor &A,
530                                          BaseType &QueryingAttribute,
531                                          IRPosition &Pos, StateType &State) {
532   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
533          "Expected an 'argument' position !");
534   const CallBase *CBContext = Pos.getCallBaseContext();
535   if (!CBContext)
536     return false;
537 
538   int ArgNo = Pos.getCallSiteArgNo();
539   assert(ArgNo >= 0 && "Invalid Arg No!");
540 
541   const auto &AA = A.getAAFor<AAType>(
542       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
543       DepClassTy::REQUIRED);
544   const StateType &CBArgumentState =
545       static_cast<const StateType &>(AA.getState());
546 
547   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
548                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
549                     << "\n");
550 
551   // NOTE: If we want to do call site grouping it should happen here.
552   State ^= CBArgumentState;
553   return true;
554 }
555 
556 /// Helper class for generic deduction: call site argument -> argument position.
557 template <typename AAType, typename BaseType,
558           typename StateType = typename AAType::StateType,
559           bool BridgeCallBaseContext = false>
560 struct AAArgumentFromCallSiteArguments : public BaseType {
561   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
562       : BaseType(IRP, A) {}
563 
564   /// See AbstractAttribute::updateImpl(...).
565   ChangeStatus updateImpl(Attributor &A) override {
566     StateType S = StateType::getBestState(this->getState());
567 
568     if (BridgeCallBaseContext) {
569       bool Success =
570           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
571               A, *this, this->getIRPosition(), S);
572       if (Success)
573         return clampStateAndIndicateChange<StateType>(this->getState(), S);
574     }
575     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
576 
577     // TODO: If we know we visited all incoming values, thus no are assumed
578     // dead, we can take the known information from the state T.
579     return clampStateAndIndicateChange<StateType>(this->getState(), S);
580   }
581 };
582 
583 /// Helper class for generic replication: function returned -> cs returned.
584 template <typename AAType, typename BaseType,
585           typename StateType = typename BaseType::StateType,
586           bool IntroduceCallBaseContext = false>
587 struct AACallSiteReturnedFromReturned : public BaseType {
588   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
589       : BaseType(IRP, A) {}
590 
591   /// See AbstractAttribute::updateImpl(...).
592   ChangeStatus updateImpl(Attributor &A) override {
593     assert(this->getIRPosition().getPositionKind() ==
594                IRPosition::IRP_CALL_SITE_RETURNED &&
595            "Can only wrap function returned positions for call site returned "
596            "positions!");
597     auto &S = this->getState();
598 
599     const Function *AssociatedFunction =
600         this->getIRPosition().getAssociatedFunction();
601     if (!AssociatedFunction)
602       return S.indicatePessimisticFixpoint();
603 
604     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
605     if (IntroduceCallBaseContext)
606       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
607                         << CBContext << "\n");
608 
609     IRPosition FnPos = IRPosition::returned(
610         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
611     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
612     return clampStateAndIndicateChange(S, AA.getState());
613   }
614 };
615 
616 /// Helper function to accumulate uses.
617 template <class AAType, typename StateType = typename AAType::StateType>
618 static void followUsesInContext(AAType &AA, Attributor &A,
619                                 MustBeExecutedContextExplorer &Explorer,
620                                 const Instruction *CtxI,
621                                 SetVector<const Use *> &Uses,
622                                 StateType &State) {
623   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
624   for (unsigned u = 0; u < Uses.size(); ++u) {
625     const Use *U = Uses[u];
626     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
627       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
628       if (Found && AA.followUseInMBEC(A, U, UserI, State))
629         for (const Use &Us : UserI->uses())
630           Uses.insert(&Us);
631     }
632   }
633 }
634 
635 /// Use the must-be-executed-context around \p I to add information into \p S.
636 /// The AAType class is required to have `followUseInMBEC` method with the
637 /// following signature and behaviour:
638 ///
639 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
640 /// U - Underlying use.
641 /// I - The user of the \p U.
642 /// Returns true if the value should be tracked transitively.
643 ///
644 template <class AAType, typename StateType = typename AAType::StateType>
645 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
646                              Instruction &CtxI) {
647 
648   // Container for (transitive) uses of the associated value.
649   SetVector<const Use *> Uses;
650   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
651     Uses.insert(&U);
652 
653   MustBeExecutedContextExplorer &Explorer =
654       A.getInfoCache().getMustBeExecutedContextExplorer();
655 
656   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
657 
658   if (S.isAtFixpoint())
659     return;
660 
661   SmallVector<const BranchInst *, 4> BrInsts;
662   auto Pred = [&](const Instruction *I) {
663     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
664       if (Br->isConditional())
665         BrInsts.push_back(Br);
666     return true;
667   };
668 
669   // Here, accumulate conditional branch instructions in the context. We
670   // explore the child paths and collect the known states. The disjunction of
671   // those states can be merged to its own state. Let ParentState_i be a state
672   // to indicate the known information for an i-th branch instruction in the
673   // context. ChildStates are created for its successors respectively.
674   //
675   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
676   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
677   //      ...
678   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
679   //
680   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
681   //
682   // FIXME: Currently, recursive branches are not handled. For example, we
683   // can't deduce that ptr must be dereferenced in below function.
684   //
685   // void f(int a, int c, int *ptr) {
686   //    if(a)
687   //      if (b) {
688   //        *ptr = 0;
689   //      } else {
690   //        *ptr = 1;
691   //      }
692   //    else {
693   //      if (b) {
694   //        *ptr = 0;
695   //      } else {
696   //        *ptr = 1;
697   //      }
698   //    }
699   // }
700 
701   Explorer.checkForAllContext(&CtxI, Pred);
702   for (const BranchInst *Br : BrInsts) {
703     StateType ParentState;
704 
705     // The known state of the parent state is a conjunction of children's
706     // known states so it is initialized with a best state.
707     ParentState.indicateOptimisticFixpoint();
708 
709     for (const BasicBlock *BB : Br->successors()) {
710       StateType ChildState;
711 
712       size_t BeforeSize = Uses.size();
713       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
714 
715       // Erase uses which only appear in the child.
716       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
717         It = Uses.erase(It);
718 
719       ParentState &= ChildState;
720     }
721 
722     // Use only known state.
723     S += ParentState;
724   }
725 }
726 
727 /// -----------------------NoUnwind Function Attribute--------------------------
728 
729 struct AANoUnwindImpl : AANoUnwind {
730   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
731 
732   const std::string getAsStr() const override {
733     return getAssumed() ? "nounwind" : "may-unwind";
734   }
735 
736   /// See AbstractAttribute::updateImpl(...).
737   ChangeStatus updateImpl(Attributor &A) override {
738     auto Opcodes = {
739         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
740         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
741         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
742 
743     auto CheckForNoUnwind = [&](Instruction &I) {
744       if (!I.mayThrow())
745         return true;
746 
747       if (const auto *CB = dyn_cast<CallBase>(&I)) {
748         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
749             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
750         return NoUnwindAA.isAssumedNoUnwind();
751       }
752       return false;
753     };
754 
755     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
756       return indicatePessimisticFixpoint();
757 
758     return ChangeStatus::UNCHANGED;
759   }
760 };
761 
762 struct AANoUnwindFunction final : public AANoUnwindImpl {
763   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
764       : AANoUnwindImpl(IRP, A) {}
765 
766   /// See AbstractAttribute::trackStatistics()
767   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
768 };
769 
770 /// NoUnwind attribute deduction for a call sites.
771 struct AANoUnwindCallSite final : AANoUnwindImpl {
772   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
773       : AANoUnwindImpl(IRP, A) {}
774 
775   /// See AbstractAttribute::initialize(...).
776   void initialize(Attributor &A) override {
777     AANoUnwindImpl::initialize(A);
778     Function *F = getAssociatedFunction();
779     if (!F || F->isDeclaration())
780       indicatePessimisticFixpoint();
781   }
782 
783   /// See AbstractAttribute::updateImpl(...).
784   ChangeStatus updateImpl(Attributor &A) override {
785     // TODO: Once we have call site specific value information we can provide
786     //       call site specific liveness information and then it makes
787     //       sense to specialize attributes for call sites arguments instead of
788     //       redirecting requests to the callee argument.
789     Function *F = getAssociatedFunction();
790     const IRPosition &FnPos = IRPosition::function(*F);
791     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
792     return clampStateAndIndicateChange(getState(), FnAA.getState());
793   }
794 
795   /// See AbstractAttribute::trackStatistics()
796   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
797 };
798 
799 /// --------------------- Function Return Values -------------------------------
800 
801 /// "Attribute" that collects all potential returned values and the return
802 /// instructions that they arise from.
803 ///
804 /// If there is a unique returned value R, the manifest method will:
805 ///   - mark R with the "returned" attribute, if R is an argument.
806 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
807 
808   /// Mapping of values potentially returned by the associated function to the
809   /// return instructions that might return them.
810   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
811 
812   /// Mapping to remember the number of returned values for a call site such
813   /// that we can avoid updates if nothing changed.
814   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
815 
816   /// Set of unresolved calls returned by the associated function.
817   SmallSetVector<CallBase *, 4> UnresolvedCalls;
818 
819   /// State flags
820   ///
821   ///{
822   bool IsFixed = false;
823   bool IsValidState = true;
824   ///}
825 
826 public:
827   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
828       : AAReturnedValues(IRP, A) {}
829 
830   /// See AbstractAttribute::initialize(...).
831   void initialize(Attributor &A) override {
832     // Reset the state.
833     IsFixed = false;
834     IsValidState = true;
835     ReturnedValues.clear();
836 
837     Function *F = getAssociatedFunction();
838     if (!F || F->isDeclaration()) {
839       indicatePessimisticFixpoint();
840       return;
841     }
842     assert(!F->getReturnType()->isVoidTy() &&
843            "Did not expect a void return type!");
844 
845     // The map from instruction opcodes to those instructions in the function.
846     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
847 
848     // Look through all arguments, if one is marked as returned we are done.
849     for (Argument &Arg : F->args()) {
850       if (Arg.hasReturnedAttr()) {
851         auto &ReturnInstSet = ReturnedValues[&Arg];
852         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
853           for (Instruction *RI : *Insts)
854             ReturnInstSet.insert(cast<ReturnInst>(RI));
855 
856         indicateOptimisticFixpoint();
857         return;
858       }
859     }
860 
861     if (!A.isFunctionIPOAmendable(*F))
862       indicatePessimisticFixpoint();
863   }
864 
865   /// See AbstractAttribute::manifest(...).
866   ChangeStatus manifest(Attributor &A) override;
867 
868   /// See AbstractAttribute::getState(...).
869   AbstractState &getState() override { return *this; }
870 
871   /// See AbstractAttribute::getState(...).
872   const AbstractState &getState() const override { return *this; }
873 
874   /// See AbstractAttribute::updateImpl(Attributor &A).
875   ChangeStatus updateImpl(Attributor &A) override;
876 
877   llvm::iterator_range<iterator> returned_values() override {
878     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
879   }
880 
881   llvm::iterator_range<const_iterator> returned_values() const override {
882     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
883   }
884 
885   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
886     return UnresolvedCalls;
887   }
888 
889   /// Return the number of potential return values, -1 if unknown.
890   size_t getNumReturnValues() const override {
891     return isValidState() ? ReturnedValues.size() : -1;
892   }
893 
894   /// Return an assumed unique return value if a single candidate is found. If
895   /// there cannot be one, return a nullptr. If it is not clear yet, return the
896   /// Optional::NoneType.
897   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
898 
899   /// See AbstractState::checkForAllReturnedValues(...).
900   bool checkForAllReturnedValuesAndReturnInsts(
901       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
902       const override;
903 
904   /// Pretty print the attribute similar to the IR representation.
905   const std::string getAsStr() const override;
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return IsFixed; }
909 
910   /// See AbstractState::isValidState().
911   bool isValidState() const override { return IsValidState; }
912 
913   /// See AbstractState::indicateOptimisticFixpoint(...).
914   ChangeStatus indicateOptimisticFixpoint() override {
915     IsFixed = true;
916     return ChangeStatus::UNCHANGED;
917   }
918 
919   ChangeStatus indicatePessimisticFixpoint() override {
920     IsFixed = true;
921     IsValidState = false;
922     return ChangeStatus::CHANGED;
923   }
924 };
925 
926 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
927   ChangeStatus Changed = ChangeStatus::UNCHANGED;
928 
929   // Bookkeeping.
930   assert(isValidState());
931   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
932                   "Number of function with known return values");
933 
934   // Check if we have an assumed unique return value that we could manifest.
935   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
936 
937   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
938     return Changed;
939 
940   // Bookkeeping.
941   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
942                   "Number of function with unique return");
943 
944   // Callback to replace the uses of CB with the constant C.
945   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
946     if (CB.use_empty())
947       return ChangeStatus::UNCHANGED;
948     if (A.changeValueAfterManifest(CB, C))
949       return ChangeStatus::CHANGED;
950     return ChangeStatus::UNCHANGED;
951   };
952 
953   // If the assumed unique return value is an argument, annotate it.
954   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
955     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
956             getAssociatedFunction()->getReturnType())) {
957       getIRPosition() = IRPosition::argument(*UniqueRVArg);
958       Changed = IRAttribute::manifest(A);
959     }
960   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
961     // We can replace the returned value with the unique returned constant.
962     Value &AnchorValue = getAnchorValue();
963     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
964       for (const Use &U : F->uses())
965         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
966           if (CB->isCallee(&U)) {
967             Constant *RVCCast =
968                 CB->getType() == RVC->getType()
969                     ? RVC
970                     : ConstantExpr::getPointerCast(RVC, CB->getType());
971             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
972           }
973     } else {
974       assert(isa<CallBase>(AnchorValue) &&
975              "Expcected a function or call base anchor!");
976       Constant *RVCCast =
977           AnchorValue.getType() == RVC->getType()
978               ? RVC
979               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
980       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
981     }
982     if (Changed == ChangeStatus::CHANGED)
983       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
984                       "Number of function returns replaced by constant return");
985   }
986 
987   return Changed;
988 }
989 
990 const std::string AAReturnedValuesImpl::getAsStr() const {
991   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
992          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
993          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
994 }
995 
996 Optional<Value *>
997 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
998   // If checkForAllReturnedValues provides a unique value, ignoring potential
999   // undef values that can also be present, it is assumed to be the actual
1000   // return value and forwarded to the caller of this method. If there are
1001   // multiple, a nullptr is returned indicating there cannot be a unique
1002   // returned value.
1003   Optional<Value *> UniqueRV;
1004 
1005   auto Pred = [&](Value &RV) -> bool {
1006     // If we found a second returned value and neither the current nor the saved
1007     // one is an undef, there is no unique returned value. Undefs are special
1008     // since we can pretend they have any value.
1009     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1010         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1011       UniqueRV = nullptr;
1012       return false;
1013     }
1014 
1015     // Do not overwrite a value with an undef.
1016     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1017       UniqueRV = &RV;
1018 
1019     return true;
1020   };
1021 
1022   if (!A.checkForAllReturnedValues(Pred, *this))
1023     UniqueRV = nullptr;
1024 
1025   return UniqueRV;
1026 }
1027 
1028 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1029     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1030     const {
1031   if (!isValidState())
1032     return false;
1033 
1034   // Check all returned values but ignore call sites as long as we have not
1035   // encountered an overdefined one during an update.
1036   for (auto &It : ReturnedValues) {
1037     Value *RV = It.first;
1038 
1039     CallBase *CB = dyn_cast<CallBase>(RV);
1040     if (CB && !UnresolvedCalls.count(CB))
1041       continue;
1042 
1043     if (!Pred(*RV, It.second))
1044       return false;
1045   }
1046 
1047   return true;
1048 }
1049 
1050 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1051   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1052   bool Changed = false;
1053 
1054   // State used in the value traversals starting in returned values.
1055   struct RVState {
1056     // The map in which we collect return values -> return instrs.
1057     decltype(ReturnedValues) &RetValsMap;
1058     // The flag to indicate a change.
1059     bool &Changed;
1060     // The return instrs we come from.
1061     SmallSetVector<ReturnInst *, 4> RetInsts;
1062   };
1063 
1064   // Callback for a leaf value returned by the associated function.
1065   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1066                          bool) -> bool {
1067     auto Size = RVS.RetValsMap[&Val].size();
1068     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1069     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1070     RVS.Changed |= Inserted;
1071     LLVM_DEBUG({
1072       if (Inserted)
1073         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1074                << " => " << RVS.RetInsts.size() << "\n";
1075     });
1076     return true;
1077   };
1078 
1079   // Helper method to invoke the generic value traversal.
1080   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1081                                 const Instruction *CtxI) {
1082     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1083     return genericValueTraversal<AAReturnedValues, RVState>(
1084         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1085         /* UseValueSimplify */ false);
1086   };
1087 
1088   // Callback for all "return intructions" live in the associated function.
1089   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1090     ReturnInst &Ret = cast<ReturnInst>(I);
1091     RVState RVS({ReturnedValues, Changed, {}});
1092     RVS.RetInsts.insert(&Ret);
1093     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1094   };
1095 
1096   // Start by discovering returned values from all live returned instructions in
1097   // the associated function.
1098   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1099     return indicatePessimisticFixpoint();
1100 
1101   // Once returned values "directly" present in the code are handled we try to
1102   // resolve returned calls. To avoid modifications to the ReturnedValues map
1103   // while we iterate over it we kept record of potential new entries in a copy
1104   // map, NewRVsMap.
1105   decltype(ReturnedValues) NewRVsMap;
1106 
1107   auto HandleReturnValue = [&](Value *RV,
1108                                SmallSetVector<ReturnInst *, 4> &RIs) {
1109     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1110                       << RIs.size() << " RIs\n");
1111     CallBase *CB = dyn_cast<CallBase>(RV);
1112     if (!CB || UnresolvedCalls.count(CB))
1113       return;
1114 
1115     if (!CB->getCalledFunction()) {
1116       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1117                         << "\n");
1118       UnresolvedCalls.insert(CB);
1119       return;
1120     }
1121 
1122     // TODO: use the function scope once we have call site AAReturnedValues.
1123     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1124         *this, IRPosition::function(*CB->getCalledFunction()),
1125         DepClassTy::REQUIRED);
1126     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1127                       << RetValAA << "\n");
1128 
1129     // Skip dead ends, thus if we do not know anything about the returned
1130     // call we mark it as unresolved and it will stay that way.
1131     if (!RetValAA.getState().isValidState()) {
1132       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1133                         << "\n");
1134       UnresolvedCalls.insert(CB);
1135       return;
1136     }
1137 
1138     // Do not try to learn partial information. If the callee has unresolved
1139     // return values we will treat the call as unresolved/opaque.
1140     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1141     if (!RetValAAUnresolvedCalls.empty()) {
1142       UnresolvedCalls.insert(CB);
1143       return;
1144     }
1145 
1146     // Now check if we can track transitively returned values. If possible, thus
1147     // if all return value can be represented in the current scope, do so.
1148     bool Unresolved = false;
1149     for (auto &RetValAAIt : RetValAA.returned_values()) {
1150       Value *RetVal = RetValAAIt.first;
1151       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1152           isa<Constant>(RetVal))
1153         continue;
1154       // Anything that did not fit in the above categories cannot be resolved,
1155       // mark the call as unresolved.
1156       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1157                            "cannot be translated: "
1158                         << *RetVal << "\n");
1159       UnresolvedCalls.insert(CB);
1160       Unresolved = true;
1161       break;
1162     }
1163 
1164     if (Unresolved)
1165       return;
1166 
1167     // Now track transitively returned values.
1168     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1169     if (NumRetAA == RetValAA.getNumReturnValues()) {
1170       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1171                            "changed since it was seen last\n");
1172       return;
1173     }
1174     NumRetAA = RetValAA.getNumReturnValues();
1175 
1176     for (auto &RetValAAIt : RetValAA.returned_values()) {
1177       Value *RetVal = RetValAAIt.first;
1178       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1179         // Arguments are mapped to call site operands and we begin the traversal
1180         // again.
1181         bool Unused = false;
1182         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1183         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1184         continue;
1185       }
1186       if (isa<CallBase>(RetVal)) {
1187         // Call sites are resolved by the callee attribute over time, no need to
1188         // do anything for us.
1189         continue;
1190       }
1191       if (isa<Constant>(RetVal)) {
1192         // Constants are valid everywhere, we can simply take them.
1193         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1194         continue;
1195       }
1196     }
1197   };
1198 
1199   for (auto &It : ReturnedValues)
1200     HandleReturnValue(It.first, It.second);
1201 
1202   // Because processing the new information can again lead to new return values
1203   // we have to be careful and iterate until this iteration is complete. The
1204   // idea is that we are in a stable state at the end of an update. All return
1205   // values have been handled and properly categorized. We might not update
1206   // again if we have not requested a non-fix attribute so we cannot "wait" for
1207   // the next update to analyze a new return value.
1208   while (!NewRVsMap.empty()) {
1209     auto It = std::move(NewRVsMap.back());
1210     NewRVsMap.pop_back();
1211 
1212     assert(!It.second.empty() && "Entry does not add anything.");
1213     auto &ReturnInsts = ReturnedValues[It.first];
1214     for (ReturnInst *RI : It.second)
1215       if (ReturnInsts.insert(RI)) {
1216         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1217                           << *It.first << " => " << *RI << "\n");
1218         HandleReturnValue(It.first, ReturnInsts);
1219         Changed = true;
1220       }
1221   }
1222 
1223   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1224   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1225 }
1226 
1227 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1228   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1229       : AAReturnedValuesImpl(IRP, A) {}
1230 
1231   /// See AbstractAttribute::trackStatistics()
1232   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1233 };
1234 
1235 /// Returned values information for a call sites.
1236 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1237   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1238       : AAReturnedValuesImpl(IRP, A) {}
1239 
1240   /// See AbstractAttribute::initialize(...).
1241   void initialize(Attributor &A) override {
1242     // TODO: Once we have call site specific value information we can provide
1243     //       call site specific liveness information and then it makes
1244     //       sense to specialize attributes for call sites instead of
1245     //       redirecting requests to the callee.
1246     llvm_unreachable("Abstract attributes for returned values are not "
1247                      "supported for call sites yet!");
1248   }
1249 
1250   /// See AbstractAttribute::updateImpl(...).
1251   ChangeStatus updateImpl(Attributor &A) override {
1252     return indicatePessimisticFixpoint();
1253   }
1254 
1255   /// See AbstractAttribute::trackStatistics()
1256   void trackStatistics() const override {}
1257 };
1258 
1259 /// ------------------------ NoSync Function Attribute -------------------------
1260 
1261 struct AANoSyncImpl : AANoSync {
1262   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1263 
1264   const std::string getAsStr() const override {
1265     return getAssumed() ? "nosync" : "may-sync";
1266   }
1267 
1268   /// See AbstractAttribute::updateImpl(...).
1269   ChangeStatus updateImpl(Attributor &A) override;
1270 
1271   /// Helper function used to determine whether an instruction is non-relaxed
1272   /// atomic. In other words, if an atomic instruction does not have unordered
1273   /// or monotonic ordering
1274   static bool isNonRelaxedAtomic(Instruction *I);
1275 
1276   /// Helper function specific for intrinsics which are potentially volatile
1277   static bool isNoSyncIntrinsic(Instruction *I);
1278 };
1279 
1280 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1281   if (!I->isAtomic())
1282     return false;
1283 
1284   if (auto *FI = dyn_cast<FenceInst>(I))
1285     // All legal orderings for fence are stronger than monotonic.
1286     return FI->getSyncScopeID() != SyncScope::SingleThread;
1287   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1288     // Unordered is not a legal ordering for cmpxchg.
1289     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1290             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1291   }
1292 
1293   AtomicOrdering Ordering;
1294   switch (I->getOpcode()) {
1295   case Instruction::AtomicRMW:
1296     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1297     break;
1298   case Instruction::Store:
1299     Ordering = cast<StoreInst>(I)->getOrdering();
1300     break;
1301   case Instruction::Load:
1302     Ordering = cast<LoadInst>(I)->getOrdering();
1303     break;
1304   default:
1305     llvm_unreachable(
1306         "New atomic operations need to be known in the attributor.");
1307   }
1308 
1309   return (Ordering != AtomicOrdering::Unordered &&
1310           Ordering != AtomicOrdering::Monotonic);
1311 }
1312 
1313 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1314 /// which would be nosync except that they have a volatile flag.  All other
1315 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1316 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1317   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1318     return !MI->isVolatile();
1319   return false;
1320 }
1321 
1322 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1323 
1324   auto CheckRWInstForNoSync = [&](Instruction &I) {
1325     /// We are looking for volatile instructions or Non-Relaxed atomics.
1326 
1327     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1328       if (CB->hasFnAttr(Attribute::NoSync))
1329         return true;
1330 
1331       if (isNoSyncIntrinsic(&I))
1332         return true;
1333 
1334       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1335           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1336       return NoSyncAA.isAssumedNoSync();
1337     }
1338 
1339     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1340       return true;
1341 
1342     return false;
1343   };
1344 
1345   auto CheckForNoSync = [&](Instruction &I) {
1346     // At this point we handled all read/write effects and they are all
1347     // nosync, so they can be skipped.
1348     if (I.mayReadOrWriteMemory())
1349       return true;
1350 
1351     // non-convergent and readnone imply nosync.
1352     return !cast<CallBase>(I).isConvergent();
1353   };
1354 
1355   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1356       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1357     return indicatePessimisticFixpoint();
1358 
1359   return ChangeStatus::UNCHANGED;
1360 }
1361 
1362 struct AANoSyncFunction final : public AANoSyncImpl {
1363   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1364       : AANoSyncImpl(IRP, A) {}
1365 
1366   /// See AbstractAttribute::trackStatistics()
1367   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1368 };
1369 
1370 /// NoSync attribute deduction for a call sites.
1371 struct AANoSyncCallSite final : AANoSyncImpl {
1372   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1373       : AANoSyncImpl(IRP, A) {}
1374 
1375   /// See AbstractAttribute::initialize(...).
1376   void initialize(Attributor &A) override {
1377     AANoSyncImpl::initialize(A);
1378     Function *F = getAssociatedFunction();
1379     if (!F || F->isDeclaration())
1380       indicatePessimisticFixpoint();
1381   }
1382 
1383   /// See AbstractAttribute::updateImpl(...).
1384   ChangeStatus updateImpl(Attributor &A) override {
1385     // TODO: Once we have call site specific value information we can provide
1386     //       call site specific liveness information and then it makes
1387     //       sense to specialize attributes for call sites arguments instead of
1388     //       redirecting requests to the callee argument.
1389     Function *F = getAssociatedFunction();
1390     const IRPosition &FnPos = IRPosition::function(*F);
1391     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1392     return clampStateAndIndicateChange(getState(), FnAA.getState());
1393   }
1394 
1395   /// See AbstractAttribute::trackStatistics()
1396   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1397 };
1398 
1399 /// ------------------------ No-Free Attributes ----------------------------
1400 
1401 struct AANoFreeImpl : public AANoFree {
1402   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1403 
1404   /// See AbstractAttribute::updateImpl(...).
1405   ChangeStatus updateImpl(Attributor &A) override {
1406     auto CheckForNoFree = [&](Instruction &I) {
1407       const auto &CB = cast<CallBase>(I);
1408       if (CB.hasFnAttr(Attribute::NoFree))
1409         return true;
1410 
1411       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1412           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1413       return NoFreeAA.isAssumedNoFree();
1414     };
1415 
1416     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1417       return indicatePessimisticFixpoint();
1418     return ChangeStatus::UNCHANGED;
1419   }
1420 
1421   /// See AbstractAttribute::getAsStr().
1422   const std::string getAsStr() const override {
1423     return getAssumed() ? "nofree" : "may-free";
1424   }
1425 };
1426 
1427 struct AANoFreeFunction final : public AANoFreeImpl {
1428   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1429       : AANoFreeImpl(IRP, A) {}
1430 
1431   /// See AbstractAttribute::trackStatistics()
1432   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1433 };
1434 
1435 /// NoFree attribute deduction for a call sites.
1436 struct AANoFreeCallSite final : AANoFreeImpl {
1437   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1438       : AANoFreeImpl(IRP, A) {}
1439 
1440   /// See AbstractAttribute::initialize(...).
1441   void initialize(Attributor &A) override {
1442     AANoFreeImpl::initialize(A);
1443     Function *F = getAssociatedFunction();
1444     if (!F || F->isDeclaration())
1445       indicatePessimisticFixpoint();
1446   }
1447 
1448   /// See AbstractAttribute::updateImpl(...).
1449   ChangeStatus updateImpl(Attributor &A) override {
1450     // TODO: Once we have call site specific value information we can provide
1451     //       call site specific liveness information and then it makes
1452     //       sense to specialize attributes for call sites arguments instead of
1453     //       redirecting requests to the callee argument.
1454     Function *F = getAssociatedFunction();
1455     const IRPosition &FnPos = IRPosition::function(*F);
1456     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1457     return clampStateAndIndicateChange(getState(), FnAA.getState());
1458   }
1459 
1460   /// See AbstractAttribute::trackStatistics()
1461   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1462 };
1463 
1464 /// NoFree attribute for floating values.
1465 struct AANoFreeFloating : AANoFreeImpl {
1466   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1467       : AANoFreeImpl(IRP, A) {}
1468 
1469   /// See AbstractAttribute::trackStatistics()
1470   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1471 
1472   /// See Abstract Attribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     const IRPosition &IRP = getIRPosition();
1475 
1476     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1477         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1478     if (NoFreeAA.isAssumedNoFree())
1479       return ChangeStatus::UNCHANGED;
1480 
1481     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1482     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1483       Instruction *UserI = cast<Instruction>(U.getUser());
1484       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1485         if (CB->isBundleOperand(&U))
1486           return false;
1487         if (!CB->isArgOperand(&U))
1488           return true;
1489         unsigned ArgNo = CB->getArgOperandNo(&U);
1490 
1491         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1492             *this, IRPosition::callsite_argument(*CB, ArgNo),
1493             DepClassTy::REQUIRED);
1494         return NoFreeArg.isAssumedNoFree();
1495       }
1496 
1497       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1498           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1499         Follow = true;
1500         return true;
1501       }
1502       if (isa<ReturnInst>(UserI))
1503         return true;
1504 
1505       // Unknown user.
1506       return false;
1507     };
1508     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1509       return indicatePessimisticFixpoint();
1510 
1511     return ChangeStatus::UNCHANGED;
1512   }
1513 };
1514 
1515 /// NoFree attribute for a call site argument.
1516 struct AANoFreeArgument final : AANoFreeFloating {
1517   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   /// See AbstractAttribute::trackStatistics()
1521   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1522 };
1523 
1524 /// NoFree attribute for call site arguments.
1525 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1526   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1527       : AANoFreeFloating(IRP, A) {}
1528 
1529   /// See AbstractAttribute::updateImpl(...).
1530   ChangeStatus updateImpl(Attributor &A) override {
1531     // TODO: Once we have call site specific value information we can provide
1532     //       call site specific liveness information and then it makes
1533     //       sense to specialize attributes for call sites arguments instead of
1534     //       redirecting requests to the callee argument.
1535     Argument *Arg = getAssociatedArgument();
1536     if (!Arg)
1537       return indicatePessimisticFixpoint();
1538     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1539     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1540     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1541   }
1542 
1543   /// See AbstractAttribute::trackStatistics()
1544   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1545 };
1546 
1547 /// NoFree attribute for function return value.
1548 struct AANoFreeReturned final : AANoFreeFloating {
1549   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1550       : AANoFreeFloating(IRP, A) {
1551     llvm_unreachable("NoFree is not applicable to function returns!");
1552   }
1553 
1554   /// See AbstractAttribute::initialize(...).
1555   void initialize(Attributor &A) override {
1556     llvm_unreachable("NoFree is not applicable to function returns!");
1557   }
1558 
1559   /// See AbstractAttribute::updateImpl(...).
1560   ChangeStatus updateImpl(Attributor &A) override {
1561     llvm_unreachable("NoFree is not applicable to function returns!");
1562   }
1563 
1564   /// See AbstractAttribute::trackStatistics()
1565   void trackStatistics() const override {}
1566 };
1567 
1568 /// NoFree attribute deduction for a call site return value.
1569 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1570   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1571       : AANoFreeFloating(IRP, A) {}
1572 
1573   ChangeStatus manifest(Attributor &A) override {
1574     return ChangeStatus::UNCHANGED;
1575   }
1576   /// See AbstractAttribute::trackStatistics()
1577   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1578 };
1579 
1580 /// ------------------------ NonNull Argument Attribute ------------------------
1581 static int64_t getKnownNonNullAndDerefBytesForUse(
1582     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1583     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1584   TrackUse = false;
1585 
1586   const Value *UseV = U->get();
1587   if (!UseV->getType()->isPointerTy())
1588     return 0;
1589 
1590   // We need to follow common pointer manipulation uses to the accesses they
1591   // feed into. We can try to be smart to avoid looking through things we do not
1592   // like for now, e.g., non-inbounds GEPs.
1593   if (isa<CastInst>(I)) {
1594     TrackUse = true;
1595     return 0;
1596   }
1597 
1598   if (isa<GetElementPtrInst>(I)) {
1599     TrackUse = true;
1600     return 0;
1601   }
1602 
1603   Type *PtrTy = UseV->getType();
1604   const Function *F = I->getFunction();
1605   bool NullPointerIsDefined =
1606       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1607   const DataLayout &DL = A.getInfoCache().getDL();
1608   if (const auto *CB = dyn_cast<CallBase>(I)) {
1609     if (CB->isBundleOperand(U)) {
1610       if (RetainedKnowledge RK = getKnowledgeFromUse(
1611               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1612         IsNonNull |=
1613             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1614         return RK.ArgValue;
1615       }
1616       return 0;
1617     }
1618 
1619     if (CB->isCallee(U)) {
1620       IsNonNull |= !NullPointerIsDefined;
1621       return 0;
1622     }
1623 
1624     unsigned ArgNo = CB->getArgOperandNo(U);
1625     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1626     // As long as we only use known information there is no need to track
1627     // dependences here.
1628     auto &DerefAA =
1629         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1630     IsNonNull |= DerefAA.isKnownNonNull();
1631     return DerefAA.getKnownDereferenceableBytes();
1632   }
1633 
1634   int64_t Offset;
1635   const Value *Base =
1636       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1637   if (Base) {
1638     if (Base == &AssociatedValue &&
1639         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1640       int64_t DerefBytes =
1641           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1642 
1643       IsNonNull |= !NullPointerIsDefined;
1644       return std::max(int64_t(0), DerefBytes);
1645     }
1646   }
1647 
1648   /// Corner case when an offset is 0.
1649   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1650                                               /*AllowNonInbounds*/ true);
1651   if (Base) {
1652     if (Offset == 0 && Base == &AssociatedValue &&
1653         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1654       int64_t DerefBytes =
1655           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1656       IsNonNull |= !NullPointerIsDefined;
1657       return std::max(int64_t(0), DerefBytes);
1658     }
1659   }
1660 
1661   return 0;
1662 }
1663 
1664 struct AANonNullImpl : AANonNull {
1665   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1666       : AANonNull(IRP, A),
1667         NullIsDefined(NullPointerIsDefined(
1668             getAnchorScope(),
1669             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1670 
1671   /// See AbstractAttribute::initialize(...).
1672   void initialize(Attributor &A) override {
1673     Value &V = getAssociatedValue();
1674     if (!NullIsDefined &&
1675         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1676                 /* IgnoreSubsumingPositions */ false, &A)) {
1677       indicateOptimisticFixpoint();
1678       return;
1679     }
1680 
1681     if (isa<ConstantPointerNull>(V)) {
1682       indicatePessimisticFixpoint();
1683       return;
1684     }
1685 
1686     AANonNull::initialize(A);
1687 
1688     bool CanBeNull, CanBeFreed;
1689     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1690                                          CanBeFreed)) {
1691       if (!CanBeNull) {
1692         indicateOptimisticFixpoint();
1693         return;
1694       }
1695     }
1696 
1697     if (isa<GlobalValue>(&getAssociatedValue())) {
1698       indicatePessimisticFixpoint();
1699       return;
1700     }
1701 
1702     if (Instruction *CtxI = getCtxI())
1703       followUsesInMBEC(*this, A, getState(), *CtxI);
1704   }
1705 
1706   /// See followUsesInMBEC
1707   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1708                        AANonNull::StateType &State) {
1709     bool IsNonNull = false;
1710     bool TrackUse = false;
1711     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1712                                        IsNonNull, TrackUse);
1713     State.setKnown(IsNonNull);
1714     return TrackUse;
1715   }
1716 
1717   /// See AbstractAttribute::getAsStr().
1718   const std::string getAsStr() const override {
1719     return getAssumed() ? "nonnull" : "may-null";
1720   }
1721 
1722   /// Flag to determine if the underlying value can be null and still allow
1723   /// valid accesses.
1724   const bool NullIsDefined;
1725 };
1726 
1727 /// NonNull attribute for a floating value.
1728 struct AANonNullFloating : public AANonNullImpl {
1729   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1730       : AANonNullImpl(IRP, A) {}
1731 
1732   /// See AbstractAttribute::updateImpl(...).
1733   ChangeStatus updateImpl(Attributor &A) override {
1734     const DataLayout &DL = A.getDataLayout();
1735 
1736     DominatorTree *DT = nullptr;
1737     AssumptionCache *AC = nullptr;
1738     InformationCache &InfoCache = A.getInfoCache();
1739     if (const Function *Fn = getAnchorScope()) {
1740       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1741       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1742     }
1743 
1744     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1745                             AANonNull::StateType &T, bool Stripped) -> bool {
1746       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1747                                              DepClassTy::REQUIRED);
1748       if (!Stripped && this == &AA) {
1749         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1750           T.indicatePessimisticFixpoint();
1751       } else {
1752         // Use abstract attribute information.
1753         const AANonNull::StateType &NS = AA.getState();
1754         T ^= NS;
1755       }
1756       return T.isValidState();
1757     };
1758 
1759     StateType T;
1760     if (!genericValueTraversal<AANonNull, StateType>(
1761             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1762       return indicatePessimisticFixpoint();
1763 
1764     return clampStateAndIndicateChange(getState(), T);
1765   }
1766 
1767   /// See AbstractAttribute::trackStatistics()
1768   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1769 };
1770 
1771 /// NonNull attribute for function return value.
1772 struct AANonNullReturned final
1773     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1774   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1775       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1776 
1777   /// See AbstractAttribute::getAsStr().
1778   const std::string getAsStr() const override {
1779     return getAssumed() ? "nonnull" : "may-null";
1780   }
1781 
1782   /// See AbstractAttribute::trackStatistics()
1783   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1784 };
1785 
1786 /// NonNull attribute for function argument.
1787 struct AANonNullArgument final
1788     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1789   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1790       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1791 
1792   /// See AbstractAttribute::trackStatistics()
1793   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1794 };
1795 
1796 struct AANonNullCallSiteArgument final : AANonNullFloating {
1797   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1798       : AANonNullFloating(IRP, A) {}
1799 
1800   /// See AbstractAttribute::trackStatistics()
1801   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1802 };
1803 
1804 /// NonNull attribute for a call site return position.
1805 struct AANonNullCallSiteReturned final
1806     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1807   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1808       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1809 
1810   /// See AbstractAttribute::trackStatistics()
1811   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1812 };
1813 
1814 /// ------------------------ No-Recurse Attributes ----------------------------
1815 
1816 struct AANoRecurseImpl : public AANoRecurse {
1817   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1818 
1819   /// See AbstractAttribute::getAsStr()
1820   const std::string getAsStr() const override {
1821     return getAssumed() ? "norecurse" : "may-recurse";
1822   }
1823 };
1824 
1825 struct AANoRecurseFunction final : AANoRecurseImpl {
1826   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1827       : AANoRecurseImpl(IRP, A) {}
1828 
1829   /// See AbstractAttribute::initialize(...).
1830   void initialize(Attributor &A) override {
1831     AANoRecurseImpl::initialize(A);
1832     if (const Function *F = getAnchorScope())
1833       if (A.getInfoCache().getSccSize(*F) != 1)
1834         indicatePessimisticFixpoint();
1835   }
1836 
1837   /// See AbstractAttribute::updateImpl(...).
1838   ChangeStatus updateImpl(Attributor &A) override {
1839 
1840     // If all live call sites are known to be no-recurse, we are as well.
1841     auto CallSitePred = [&](AbstractCallSite ACS) {
1842       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1843           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1844           DepClassTy::NONE);
1845       return NoRecurseAA.isKnownNoRecurse();
1846     };
1847     bool AllCallSitesKnown;
1848     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1849       // If we know all call sites and all are known no-recurse, we are done.
1850       // If all known call sites, which might not be all that exist, are known
1851       // to be no-recurse, we are not done but we can continue to assume
1852       // no-recurse. If one of the call sites we have not visited will become
1853       // live, another update is triggered.
1854       if (AllCallSitesKnown)
1855         indicateOptimisticFixpoint();
1856       return ChangeStatus::UNCHANGED;
1857     }
1858 
1859     // If the above check does not hold anymore we look at the calls.
1860     auto CheckForNoRecurse = [&](Instruction &I) {
1861       const auto &CB = cast<CallBase>(I);
1862       if (CB.hasFnAttr(Attribute::NoRecurse))
1863         return true;
1864 
1865       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1866           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1867       if (!NoRecurseAA.isAssumedNoRecurse())
1868         return false;
1869 
1870       // Recursion to the same function
1871       if (CB.getCalledFunction() == getAnchorScope())
1872         return false;
1873 
1874       return true;
1875     };
1876 
1877     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1878       return indicatePessimisticFixpoint();
1879     return ChangeStatus::UNCHANGED;
1880   }
1881 
1882   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1883 };
1884 
1885 /// NoRecurse attribute deduction for a call sites.
1886 struct AANoRecurseCallSite final : AANoRecurseImpl {
1887   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1888       : AANoRecurseImpl(IRP, A) {}
1889 
1890   /// See AbstractAttribute::initialize(...).
1891   void initialize(Attributor &A) override {
1892     AANoRecurseImpl::initialize(A);
1893     Function *F = getAssociatedFunction();
1894     if (!F || F->isDeclaration())
1895       indicatePessimisticFixpoint();
1896   }
1897 
1898   /// See AbstractAttribute::updateImpl(...).
1899   ChangeStatus updateImpl(Attributor &A) override {
1900     // TODO: Once we have call site specific value information we can provide
1901     //       call site specific liveness information and then it makes
1902     //       sense to specialize attributes for call sites arguments instead of
1903     //       redirecting requests to the callee argument.
1904     Function *F = getAssociatedFunction();
1905     const IRPosition &FnPos = IRPosition::function(*F);
1906     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1907     return clampStateAndIndicateChange(getState(), FnAA.getState());
1908   }
1909 
1910   /// See AbstractAttribute::trackStatistics()
1911   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1912 };
1913 
1914 /// -------------------- Undefined-Behavior Attributes ------------------------
1915 
1916 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1917   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1918       : AAUndefinedBehavior(IRP, A) {}
1919 
1920   /// See AbstractAttribute::updateImpl(...).
1921   // through a pointer (i.e. also branches etc.)
1922   ChangeStatus updateImpl(Attributor &A) override {
1923     const size_t UBPrevSize = KnownUBInsts.size();
1924     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1925 
1926     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1927       // Skip instructions that are already saved.
1928       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1929         return true;
1930 
1931       // If we reach here, we know we have an instruction
1932       // that accesses memory through a pointer operand,
1933       // for which getPointerOperand() should give it to us.
1934       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1935       assert(PtrOp &&
1936              "Expected pointer operand of memory accessing instruction");
1937 
1938       // Either we stopped and the appropriate action was taken,
1939       // or we got back a simplified value to continue.
1940       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1941       if (!SimplifiedPtrOp.hasValue())
1942         return true;
1943       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1944 
1945       // A memory access through a pointer is considered UB
1946       // only if the pointer has constant null value.
1947       // TODO: Expand it to not only check constant values.
1948       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1949         AssumedNoUBInsts.insert(&I);
1950         return true;
1951       }
1952       const Type *PtrTy = PtrOpVal->getType();
1953 
1954       // Because we only consider instructions inside functions,
1955       // assume that a parent function exists.
1956       const Function *F = I.getFunction();
1957 
1958       // A memory access using constant null pointer is only considered UB
1959       // if null pointer is _not_ defined for the target platform.
1960       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1961         AssumedNoUBInsts.insert(&I);
1962       else
1963         KnownUBInsts.insert(&I);
1964       return true;
1965     };
1966 
1967     auto InspectBrInstForUB = [&](Instruction &I) {
1968       // A conditional branch instruction is considered UB if it has `undef`
1969       // condition.
1970 
1971       // Skip instructions that are already saved.
1972       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1973         return true;
1974 
1975       // We know we have a branch instruction.
1976       auto BrInst = cast<BranchInst>(&I);
1977 
1978       // Unconditional branches are never considered UB.
1979       if (BrInst->isUnconditional())
1980         return true;
1981 
1982       // Either we stopped and the appropriate action was taken,
1983       // or we got back a simplified value to continue.
1984       Optional<Value *> SimplifiedCond =
1985           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1986       if (!SimplifiedCond.hasValue())
1987         return true;
1988       AssumedNoUBInsts.insert(&I);
1989       return true;
1990     };
1991 
1992     auto InspectCallSiteForUB = [&](Instruction &I) {
1993       // Check whether a callsite always cause UB or not
1994 
1995       // Skip instructions that are already saved.
1996       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1997         return true;
1998 
1999       // Check nonnull and noundef argument attribute violation for each
2000       // callsite.
2001       CallBase &CB = cast<CallBase>(I);
2002       Function *Callee = CB.getCalledFunction();
2003       if (!Callee)
2004         return true;
2005       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2006         // If current argument is known to be simplified to null pointer and the
2007         // corresponding argument position is known to have nonnull attribute,
2008         // the argument is poison. Furthermore, if the argument is poison and
2009         // the position is known to have noundef attriubte, this callsite is
2010         // considered UB.
2011         if (idx >= Callee->arg_size())
2012           break;
2013         Value *ArgVal = CB.getArgOperand(idx);
2014         if (!ArgVal)
2015           continue;
2016         // Here, we handle three cases.
2017         //   (1) Not having a value means it is dead. (we can replace the value
2018         //       with undef)
2019         //   (2) Simplified to undef. The argument violate noundef attriubte.
2020         //   (3) Simplified to null pointer where known to be nonnull.
2021         //       The argument is a poison value and violate noundef attribute.
2022         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2023         auto &NoUndefAA =
2024             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2025         if (!NoUndefAA.isKnownNoUndef())
2026           continue;
2027         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2028             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2029         if (!ValueSimplifyAA.isKnown())
2030           continue;
2031         Optional<Value *> SimplifiedVal =
2032             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2033         if (!SimplifiedVal.hasValue() ||
2034             isa<UndefValue>(*SimplifiedVal.getValue())) {
2035           KnownUBInsts.insert(&I);
2036           continue;
2037         }
2038         if (!ArgVal->getType()->isPointerTy() ||
2039             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2040           continue;
2041         auto &NonNullAA =
2042             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2043         if (NonNullAA.isKnownNonNull())
2044           KnownUBInsts.insert(&I);
2045       }
2046       return true;
2047     };
2048 
2049     auto InspectReturnInstForUB =
2050         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2051           // Check if a return instruction always cause UB or not
2052           // Note: It is guaranteed that the returned position of the anchor
2053           //       scope has noundef attribute when this is called.
2054           //       We also ensure the return position is not "assumed dead"
2055           //       because the returned value was then potentially simplified to
2056           //       `undef` in AAReturnedValues without removing the `noundef`
2057           //       attribute yet.
2058 
2059           // When the returned position has noundef attriubte, UB occur in the
2060           // following cases.
2061           //   (1) Returned value is known to be undef.
2062           //   (2) The value is known to be a null pointer and the returned
2063           //       position has nonnull attribute (because the returned value is
2064           //       poison).
2065           bool FoundUB = false;
2066           if (isa<UndefValue>(V)) {
2067             FoundUB = true;
2068           } else {
2069             if (isa<ConstantPointerNull>(V)) {
2070               auto &NonNullAA = A.getAAFor<AANonNull>(
2071                   *this, IRPosition::returned(*getAnchorScope()),
2072                   DepClassTy::NONE);
2073               if (NonNullAA.isKnownNonNull())
2074                 FoundUB = true;
2075             }
2076           }
2077 
2078           if (FoundUB)
2079             for (ReturnInst *RI : RetInsts)
2080               KnownUBInsts.insert(RI);
2081           return true;
2082         };
2083 
2084     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2085                               {Instruction::Load, Instruction::Store,
2086                                Instruction::AtomicCmpXchg,
2087                                Instruction::AtomicRMW},
2088                               /* CheckBBLivenessOnly */ true);
2089     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2090                               /* CheckBBLivenessOnly */ true);
2091     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2092 
2093     // If the returned position of the anchor scope has noundef attriubte, check
2094     // all returned instructions.
2095     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2096       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2097       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2098         auto &RetPosNoUndefAA =
2099             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2100         if (RetPosNoUndefAA.isKnownNoUndef())
2101           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2102                                                     *this);
2103       }
2104     }
2105 
2106     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2107         UBPrevSize != KnownUBInsts.size())
2108       return ChangeStatus::CHANGED;
2109     return ChangeStatus::UNCHANGED;
2110   }
2111 
2112   bool isKnownToCauseUB(Instruction *I) const override {
2113     return KnownUBInsts.count(I);
2114   }
2115 
2116   bool isAssumedToCauseUB(Instruction *I) const override {
2117     // In simple words, if an instruction is not in the assumed to _not_
2118     // cause UB, then it is assumed UB (that includes those
2119     // in the KnownUBInsts set). The rest is boilerplate
2120     // is to ensure that it is one of the instructions we test
2121     // for UB.
2122 
2123     switch (I->getOpcode()) {
2124     case Instruction::Load:
2125     case Instruction::Store:
2126     case Instruction::AtomicCmpXchg:
2127     case Instruction::AtomicRMW:
2128       return !AssumedNoUBInsts.count(I);
2129     case Instruction::Br: {
2130       auto BrInst = cast<BranchInst>(I);
2131       if (BrInst->isUnconditional())
2132         return false;
2133       return !AssumedNoUBInsts.count(I);
2134     } break;
2135     default:
2136       return false;
2137     }
2138     return false;
2139   }
2140 
2141   ChangeStatus manifest(Attributor &A) override {
2142     if (KnownUBInsts.empty())
2143       return ChangeStatus::UNCHANGED;
2144     for (Instruction *I : KnownUBInsts)
2145       A.changeToUnreachableAfterManifest(I);
2146     return ChangeStatus::CHANGED;
2147   }
2148 
2149   /// See AbstractAttribute::getAsStr()
2150   const std::string getAsStr() const override {
2151     return getAssumed() ? "undefined-behavior" : "no-ub";
2152   }
2153 
2154   /// Note: The correctness of this analysis depends on the fact that the
2155   /// following 2 sets will stop changing after some point.
2156   /// "Change" here means that their size changes.
2157   /// The size of each set is monotonically increasing
2158   /// (we only add items to them) and it is upper bounded by the number of
2159   /// instructions in the processed function (we can never save more
2160   /// elements in either set than this number). Hence, at some point,
2161   /// they will stop increasing.
2162   /// Consequently, at some point, both sets will have stopped
2163   /// changing, effectively making the analysis reach a fixpoint.
2164 
2165   /// Note: These 2 sets are disjoint and an instruction can be considered
2166   /// one of 3 things:
2167   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2168   ///    the KnownUBInsts set.
2169   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2170   ///    has a reason to assume it).
2171   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2172   ///    could not find a reason to assume or prove that it can cause UB,
2173   ///    hence it assumes it doesn't. We have a set for these instructions
2174   ///    so that we don't reprocess them in every update.
2175   ///    Note however that instructions in this set may cause UB.
2176 
2177 protected:
2178   /// A set of all live instructions _known_ to cause UB.
2179   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2180 
2181 private:
2182   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2183   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2184 
2185   // Should be called on updates in which if we're processing an instruction
2186   // \p I that depends on a value \p V, one of the following has to happen:
2187   // - If the value is assumed, then stop.
2188   // - If the value is known but undef, then consider it UB.
2189   // - Otherwise, do specific processing with the simplified value.
2190   // We return None in the first 2 cases to signify that an appropriate
2191   // action was taken and the caller should stop.
2192   // Otherwise, we return the simplified value that the caller should
2193   // use for specific processing.
2194   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2195                                          Instruction *I) {
2196     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2197         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2198     Optional<Value *> SimplifiedV =
2199         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2200     if (!ValueSimplifyAA.isKnown()) {
2201       // Don't depend on assumed values.
2202       return llvm::None;
2203     }
2204     if (!SimplifiedV.hasValue()) {
2205       // If it is known (which we tested above) but it doesn't have a value,
2206       // then we can assume `undef` and hence the instruction is UB.
2207       KnownUBInsts.insert(I);
2208       return llvm::None;
2209     }
2210     Value *Val = SimplifiedV.getValue();
2211     if (isa<UndefValue>(Val)) {
2212       KnownUBInsts.insert(I);
2213       return llvm::None;
2214     }
2215     return Val;
2216   }
2217 };
2218 
2219 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2220   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2221       : AAUndefinedBehaviorImpl(IRP, A) {}
2222 
2223   /// See AbstractAttribute::trackStatistics()
2224   void trackStatistics() const override {
2225     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2226                "Number of instructions known to have UB");
2227     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2228         KnownUBInsts.size();
2229   }
2230 };
2231 
2232 /// ------------------------ Will-Return Attributes ----------------------------
2233 
2234 // Helper function that checks whether a function has any cycle which we don't
2235 // know if it is bounded or not.
2236 // Loops with maximum trip count are considered bounded, any other cycle not.
2237 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2238   ScalarEvolution *SE =
2239       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2240   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2241   // If either SCEV or LoopInfo is not available for the function then we assume
2242   // any cycle to be unbounded cycle.
2243   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2244   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2245   if (!SE || !LI) {
2246     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2247       if (SCCI.hasCycle())
2248         return true;
2249     return false;
2250   }
2251 
2252   // If there's irreducible control, the function may contain non-loop cycles.
2253   if (mayContainIrreducibleControl(F, LI))
2254     return true;
2255 
2256   // Any loop that does not have a max trip count is considered unbounded cycle.
2257   for (auto *L : LI->getLoopsInPreorder()) {
2258     if (!SE->getSmallConstantMaxTripCount(L))
2259       return true;
2260   }
2261   return false;
2262 }
2263 
2264 struct AAWillReturnImpl : public AAWillReturn {
2265   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2266       : AAWillReturn(IRP, A) {}
2267 
2268   /// See AbstractAttribute::initialize(...).
2269   void initialize(Attributor &A) override {
2270     AAWillReturn::initialize(A);
2271 
2272     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2273       indicateOptimisticFixpoint();
2274       return;
2275     }
2276   }
2277 
2278   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2279   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2280     // Check for `mustprogress` in the scope and the associated function which
2281     // might be different if this is a call site.
2282     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2283         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2284       return false;
2285 
2286     const auto &MemAA =
2287         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2288     if (!MemAA.isAssumedReadOnly())
2289       return false;
2290     if (KnownOnly && !MemAA.isKnownReadOnly())
2291       return false;
2292     if (!MemAA.isKnownReadOnly())
2293       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2294 
2295     return true;
2296   }
2297 
2298   /// See AbstractAttribute::updateImpl(...).
2299   ChangeStatus updateImpl(Attributor &A) override {
2300     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2301       return ChangeStatus::UNCHANGED;
2302 
2303     auto CheckForWillReturn = [&](Instruction &I) {
2304       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2305       const auto &WillReturnAA =
2306           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2307       if (WillReturnAA.isKnownWillReturn())
2308         return true;
2309       if (!WillReturnAA.isAssumedWillReturn())
2310         return false;
2311       const auto &NoRecurseAA =
2312           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2313       return NoRecurseAA.isAssumedNoRecurse();
2314     };
2315 
2316     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2317       return indicatePessimisticFixpoint();
2318 
2319     return ChangeStatus::UNCHANGED;
2320   }
2321 
2322   /// See AbstractAttribute::getAsStr()
2323   const std::string getAsStr() const override {
2324     return getAssumed() ? "willreturn" : "may-noreturn";
2325   }
2326 };
2327 
2328 struct AAWillReturnFunction final : AAWillReturnImpl {
2329   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2330       : AAWillReturnImpl(IRP, A) {}
2331 
2332   /// See AbstractAttribute::initialize(...).
2333   void initialize(Attributor &A) override {
2334     AAWillReturnImpl::initialize(A);
2335 
2336     Function *F = getAnchorScope();
2337     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2338       indicatePessimisticFixpoint();
2339   }
2340 
2341   /// See AbstractAttribute::trackStatistics()
2342   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2343 };
2344 
2345 /// WillReturn attribute deduction for a call sites.
2346 struct AAWillReturnCallSite final : AAWillReturnImpl {
2347   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2348       : AAWillReturnImpl(IRP, A) {}
2349 
2350   /// See AbstractAttribute::initialize(...).
2351   void initialize(Attributor &A) override {
2352     AAWillReturnImpl::initialize(A);
2353     Function *F = getAssociatedFunction();
2354     if (!F || !A.isFunctionIPOAmendable(*F))
2355       indicatePessimisticFixpoint();
2356   }
2357 
2358   /// See AbstractAttribute::updateImpl(...).
2359   ChangeStatus updateImpl(Attributor &A) override {
2360     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2361       return ChangeStatus::UNCHANGED;
2362 
2363     // TODO: Once we have call site specific value information we can provide
2364     //       call site specific liveness information and then it makes
2365     //       sense to specialize attributes for call sites arguments instead of
2366     //       redirecting requests to the callee argument.
2367     Function *F = getAssociatedFunction();
2368     const IRPosition &FnPos = IRPosition::function(*F);
2369     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2370     return clampStateAndIndicateChange(getState(), FnAA.getState());
2371   }
2372 
2373   /// See AbstractAttribute::trackStatistics()
2374   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2375 };
2376 
2377 /// -------------------AAReachability Attribute--------------------------
2378 
2379 struct AAReachabilityImpl : AAReachability {
2380   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2381       : AAReachability(IRP, A) {}
2382 
2383   const std::string getAsStr() const override {
2384     // TODO: Return the number of reachable queries.
2385     return "reachable";
2386   }
2387 
2388   /// See AbstractAttribute::initialize(...).
2389   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2390 
2391   /// See AbstractAttribute::updateImpl(...).
2392   ChangeStatus updateImpl(Attributor &A) override {
2393     return indicatePessimisticFixpoint();
2394   }
2395 };
2396 
2397 struct AAReachabilityFunction final : public AAReachabilityImpl {
2398   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2399       : AAReachabilityImpl(IRP, A) {}
2400 
2401   /// See AbstractAttribute::trackStatistics()
2402   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2403 };
2404 
2405 /// ------------------------ NoAlias Argument Attribute ------------------------
2406 
2407 struct AANoAliasImpl : AANoAlias {
2408   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2409     assert(getAssociatedType()->isPointerTy() &&
2410            "Noalias is a pointer attribute");
2411   }
2412 
2413   const std::string getAsStr() const override {
2414     return getAssumed() ? "noalias" : "may-alias";
2415   }
2416 };
2417 
2418 /// NoAlias attribute for a floating value.
2419 struct AANoAliasFloating final : AANoAliasImpl {
2420   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2421       : AANoAliasImpl(IRP, A) {}
2422 
2423   /// See AbstractAttribute::initialize(...).
2424   void initialize(Attributor &A) override {
2425     AANoAliasImpl::initialize(A);
2426     Value *Val = &getAssociatedValue();
2427     do {
2428       CastInst *CI = dyn_cast<CastInst>(Val);
2429       if (!CI)
2430         break;
2431       Value *Base = CI->getOperand(0);
2432       if (!Base->hasOneUse())
2433         break;
2434       Val = Base;
2435     } while (true);
2436 
2437     if (!Val->getType()->isPointerTy()) {
2438       indicatePessimisticFixpoint();
2439       return;
2440     }
2441 
2442     if (isa<AllocaInst>(Val))
2443       indicateOptimisticFixpoint();
2444     else if (isa<ConstantPointerNull>(Val) &&
2445              !NullPointerIsDefined(getAnchorScope(),
2446                                    Val->getType()->getPointerAddressSpace()))
2447       indicateOptimisticFixpoint();
2448     else if (Val != &getAssociatedValue()) {
2449       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2450           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2451       if (ValNoAliasAA.isKnownNoAlias())
2452         indicateOptimisticFixpoint();
2453     }
2454   }
2455 
2456   /// See AbstractAttribute::updateImpl(...).
2457   ChangeStatus updateImpl(Attributor &A) override {
2458     // TODO: Implement this.
2459     return indicatePessimisticFixpoint();
2460   }
2461 
2462   /// See AbstractAttribute::trackStatistics()
2463   void trackStatistics() const override {
2464     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2465   }
2466 };
2467 
2468 /// NoAlias attribute for an argument.
2469 struct AANoAliasArgument final
2470     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2471   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2472   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2473 
2474   /// See AbstractAttribute::initialize(...).
2475   void initialize(Attributor &A) override {
2476     Base::initialize(A);
2477     // See callsite argument attribute and callee argument attribute.
2478     if (hasAttr({Attribute::ByVal}))
2479       indicateOptimisticFixpoint();
2480   }
2481 
2482   /// See AbstractAttribute::update(...).
2483   ChangeStatus updateImpl(Attributor &A) override {
2484     // We have to make sure no-alias on the argument does not break
2485     // synchronization when this is a callback argument, see also [1] below.
2486     // If synchronization cannot be affected, we delegate to the base updateImpl
2487     // function, otherwise we give up for now.
2488 
2489     // If the function is no-sync, no-alias cannot break synchronization.
2490     const auto &NoSyncAA =
2491         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2492                              DepClassTy::OPTIONAL);
2493     if (NoSyncAA.isAssumedNoSync())
2494       return Base::updateImpl(A);
2495 
2496     // If the argument is read-only, no-alias cannot break synchronization.
2497     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2498         *this, getIRPosition(), DepClassTy::OPTIONAL);
2499     if (MemBehaviorAA.isAssumedReadOnly())
2500       return Base::updateImpl(A);
2501 
2502     // If the argument is never passed through callbacks, no-alias cannot break
2503     // synchronization.
2504     bool AllCallSitesKnown;
2505     if (A.checkForAllCallSites(
2506             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2507             true, AllCallSitesKnown))
2508       return Base::updateImpl(A);
2509 
2510     // TODO: add no-alias but make sure it doesn't break synchronization by
2511     // introducing fake uses. See:
2512     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2513     //     International Workshop on OpenMP 2018,
2514     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2515 
2516     return indicatePessimisticFixpoint();
2517   }
2518 
2519   /// See AbstractAttribute::trackStatistics()
2520   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2521 };
2522 
2523 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2524   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2525       : AANoAliasImpl(IRP, A) {}
2526 
2527   /// See AbstractAttribute::initialize(...).
2528   void initialize(Attributor &A) override {
2529     // See callsite argument attribute and callee argument attribute.
2530     const auto &CB = cast<CallBase>(getAnchorValue());
2531     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2532       indicateOptimisticFixpoint();
2533     Value &Val = getAssociatedValue();
2534     if (isa<ConstantPointerNull>(Val) &&
2535         !NullPointerIsDefined(getAnchorScope(),
2536                               Val.getType()->getPointerAddressSpace()))
2537       indicateOptimisticFixpoint();
2538   }
2539 
2540   /// Determine if the underlying value may alias with the call site argument
2541   /// \p OtherArgNo of \p ICS (= the underlying call site).
2542   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2543                             const AAMemoryBehavior &MemBehaviorAA,
2544                             const CallBase &CB, unsigned OtherArgNo) {
2545     // We do not need to worry about aliasing with the underlying IRP.
2546     if (this->getCalleeArgNo() == (int)OtherArgNo)
2547       return false;
2548 
2549     // If it is not a pointer or pointer vector we do not alias.
2550     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2551     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2552       return false;
2553 
2554     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2555         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2556 
2557     // If the argument is readnone, there is no read-write aliasing.
2558     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2559       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2560       return false;
2561     }
2562 
2563     // If the argument is readonly and the underlying value is readonly, there
2564     // is no read-write aliasing.
2565     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2566     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2567       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2568       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2569       return false;
2570     }
2571 
2572     // We have to utilize actual alias analysis queries so we need the object.
2573     if (!AAR)
2574       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2575 
2576     // Try to rule it out at the call site.
2577     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2578     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2579                          "callsite arguments: "
2580                       << getAssociatedValue() << " " << *ArgOp << " => "
2581                       << (IsAliasing ? "" : "no-") << "alias \n");
2582 
2583     return IsAliasing;
2584   }
2585 
2586   bool
2587   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2588                                          const AAMemoryBehavior &MemBehaviorAA,
2589                                          const AANoAlias &NoAliasAA) {
2590     // We can deduce "noalias" if the following conditions hold.
2591     // (i)   Associated value is assumed to be noalias in the definition.
2592     // (ii)  Associated value is assumed to be no-capture in all the uses
2593     //       possibly executed before this callsite.
2594     // (iii) There is no other pointer argument which could alias with the
2595     //       value.
2596 
2597     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2598     if (!AssociatedValueIsNoAliasAtDef) {
2599       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2600                         << " is not no-alias at the definition\n");
2601       return false;
2602     }
2603 
2604     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2605 
2606     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2607     const Function *ScopeFn = VIRP.getAnchorScope();
2608     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2609     // Check whether the value is captured in the scope using AANoCapture.
2610     //      Look at CFG and check only uses possibly executed before this
2611     //      callsite.
2612     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2613       Instruction *UserI = cast<Instruction>(U.getUser());
2614 
2615       // If UserI is the curr instruction and there is a single potential use of
2616       // the value in UserI we allow the use.
2617       // TODO: We should inspect the operands and allow those that cannot alias
2618       //       with the value.
2619       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2620         return true;
2621 
2622       if (ScopeFn) {
2623         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2624             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2625 
2626         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2627           return true;
2628 
2629         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2630           if (CB->isArgOperand(&U)) {
2631 
2632             unsigned ArgNo = CB->getArgOperandNo(&U);
2633 
2634             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2635                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2636                 DepClassTy::OPTIONAL);
2637 
2638             if (NoCaptureAA.isAssumedNoCapture())
2639               return true;
2640           }
2641         }
2642       }
2643 
2644       // For cases which can potentially have more users
2645       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2646           isa<SelectInst>(U)) {
2647         Follow = true;
2648         return true;
2649       }
2650 
2651       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2652       return false;
2653     };
2654 
2655     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2656       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2657         LLVM_DEBUG(
2658             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2659                    << " cannot be noalias as it is potentially captured\n");
2660         return false;
2661       }
2662     }
2663     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2664 
2665     // Check there is no other pointer argument which could alias with the
2666     // value passed at this call site.
2667     // TODO: AbstractCallSite
2668     const auto &CB = cast<CallBase>(getAnchorValue());
2669     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2670          OtherArgNo++)
2671       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2672         return false;
2673 
2674     return true;
2675   }
2676 
2677   /// See AbstractAttribute::updateImpl(...).
2678   ChangeStatus updateImpl(Attributor &A) override {
2679     // If the argument is readnone we are done as there are no accesses via the
2680     // argument.
2681     auto &MemBehaviorAA =
2682         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2683     if (MemBehaviorAA.isAssumedReadNone()) {
2684       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2685       return ChangeStatus::UNCHANGED;
2686     }
2687 
2688     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2689     const auto &NoAliasAA =
2690         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2691 
2692     AAResults *AAR = nullptr;
2693     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2694                                                NoAliasAA)) {
2695       LLVM_DEBUG(
2696           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2697       return ChangeStatus::UNCHANGED;
2698     }
2699 
2700     return indicatePessimisticFixpoint();
2701   }
2702 
2703   /// See AbstractAttribute::trackStatistics()
2704   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2705 };
2706 
2707 /// NoAlias attribute for function return value.
2708 struct AANoAliasReturned final : AANoAliasImpl {
2709   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2710       : AANoAliasImpl(IRP, A) {}
2711 
2712   /// See AbstractAttribute::initialize(...).
2713   void initialize(Attributor &A) override {
2714     AANoAliasImpl::initialize(A);
2715     Function *F = getAssociatedFunction();
2716     if (!F || F->isDeclaration())
2717       indicatePessimisticFixpoint();
2718   }
2719 
2720   /// See AbstractAttribute::updateImpl(...).
2721   virtual ChangeStatus updateImpl(Attributor &A) override {
2722 
2723     auto CheckReturnValue = [&](Value &RV) -> bool {
2724       if (Constant *C = dyn_cast<Constant>(&RV))
2725         if (C->isNullValue() || isa<UndefValue>(C))
2726           return true;
2727 
2728       /// For now, we can only deduce noalias if we have call sites.
2729       /// FIXME: add more support.
2730       if (!isa<CallBase>(&RV))
2731         return false;
2732 
2733       const IRPosition &RVPos = IRPosition::value(RV);
2734       const auto &NoAliasAA =
2735           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2736       if (!NoAliasAA.isAssumedNoAlias())
2737         return false;
2738 
2739       const auto &NoCaptureAA =
2740           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2741       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2742     };
2743 
2744     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2745       return indicatePessimisticFixpoint();
2746 
2747     return ChangeStatus::UNCHANGED;
2748   }
2749 
2750   /// See AbstractAttribute::trackStatistics()
2751   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2752 };
2753 
2754 /// NoAlias attribute deduction for a call site return value.
2755 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2756   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2757       : AANoAliasImpl(IRP, A) {}
2758 
2759   /// See AbstractAttribute::initialize(...).
2760   void initialize(Attributor &A) override {
2761     AANoAliasImpl::initialize(A);
2762     Function *F = getAssociatedFunction();
2763     if (!F || F->isDeclaration())
2764       indicatePessimisticFixpoint();
2765   }
2766 
2767   /// See AbstractAttribute::updateImpl(...).
2768   ChangeStatus updateImpl(Attributor &A) override {
2769     // TODO: Once we have call site specific value information we can provide
2770     //       call site specific liveness information and then it makes
2771     //       sense to specialize attributes for call sites arguments instead of
2772     //       redirecting requests to the callee argument.
2773     Function *F = getAssociatedFunction();
2774     const IRPosition &FnPos = IRPosition::returned(*F);
2775     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2776     return clampStateAndIndicateChange(getState(), FnAA.getState());
2777   }
2778 
2779   /// See AbstractAttribute::trackStatistics()
2780   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2781 };
2782 
2783 /// -------------------AAIsDead Function Attribute-----------------------
2784 
2785 struct AAIsDeadValueImpl : public AAIsDead {
2786   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2787 
2788   /// See AAIsDead::isAssumedDead().
2789   bool isAssumedDead() const override { return getAssumed(); }
2790 
2791   /// See AAIsDead::isKnownDead().
2792   bool isKnownDead() const override { return getKnown(); }
2793 
2794   /// See AAIsDead::isAssumedDead(BasicBlock *).
2795   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2796 
2797   /// See AAIsDead::isKnownDead(BasicBlock *).
2798   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2799 
2800   /// See AAIsDead::isAssumedDead(Instruction *I).
2801   bool isAssumedDead(const Instruction *I) const override {
2802     return I == getCtxI() && isAssumedDead();
2803   }
2804 
2805   /// See AAIsDead::isKnownDead(Instruction *I).
2806   bool isKnownDead(const Instruction *I) const override {
2807     return isAssumedDead(I) && getKnown();
2808   }
2809 
2810   /// See AbstractAttribute::getAsStr().
2811   const std::string getAsStr() const override {
2812     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2813   }
2814 
2815   /// Check if all uses are assumed dead.
2816   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2817     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2818     // Explicitly set the dependence class to required because we want a long
2819     // chain of N dependent instructions to be considered live as soon as one is
2820     // without going through N update cycles. This is not required for
2821     // correctness.
2822     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2823   }
2824 
2825   /// Determine if \p I is assumed to be side-effect free.
2826   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2827     if (!I || wouldInstructionBeTriviallyDead(I))
2828       return true;
2829 
2830     auto *CB = dyn_cast<CallBase>(I);
2831     if (!CB || isa<IntrinsicInst>(CB))
2832       return false;
2833 
2834     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2835     const auto &NoUnwindAA =
2836         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2837     if (!NoUnwindAA.isAssumedNoUnwind())
2838       return false;
2839     if (!NoUnwindAA.isKnownNoUnwind())
2840       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2841 
2842     const auto &MemBehaviorAA =
2843         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2844     if (MemBehaviorAA.isAssumedReadOnly()) {
2845       if (!MemBehaviorAA.isKnownReadOnly())
2846         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2847       return true;
2848     }
2849     return false;
2850   }
2851 };
2852 
2853 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2854   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2855       : AAIsDeadValueImpl(IRP, A) {}
2856 
2857   /// See AbstractAttribute::initialize(...).
2858   void initialize(Attributor &A) override {
2859     if (isa<UndefValue>(getAssociatedValue())) {
2860       indicatePessimisticFixpoint();
2861       return;
2862     }
2863 
2864     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2865     if (!isAssumedSideEffectFree(A, I))
2866       indicatePessimisticFixpoint();
2867   }
2868 
2869   /// See AbstractAttribute::updateImpl(...).
2870   ChangeStatus updateImpl(Attributor &A) override {
2871     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2872     if (!isAssumedSideEffectFree(A, I))
2873       return indicatePessimisticFixpoint();
2874 
2875     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2876       return indicatePessimisticFixpoint();
2877     return ChangeStatus::UNCHANGED;
2878   }
2879 
2880   /// See AbstractAttribute::manifest(...).
2881   ChangeStatus manifest(Attributor &A) override {
2882     Value &V = getAssociatedValue();
2883     if (auto *I = dyn_cast<Instruction>(&V)) {
2884       // If we get here we basically know the users are all dead. We check if
2885       // isAssumedSideEffectFree returns true here again because it might not be
2886       // the case and only the users are dead but the instruction (=call) is
2887       // still needed.
2888       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2889         A.deleteAfterManifest(*I);
2890         return ChangeStatus::CHANGED;
2891       }
2892     }
2893     if (V.use_empty())
2894       return ChangeStatus::UNCHANGED;
2895 
2896     bool UsedAssumedInformation = false;
2897     Optional<Constant *> C =
2898         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2899     if (C.hasValue() && C.getValue())
2900       return ChangeStatus::UNCHANGED;
2901 
2902     // Replace the value with undef as it is dead but keep droppable uses around
2903     // as they provide information we don't want to give up on just yet.
2904     UndefValue &UV = *UndefValue::get(V.getType());
2905     bool AnyChange =
2906         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2907     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2908   }
2909 
2910   /// See AbstractAttribute::trackStatistics()
2911   void trackStatistics() const override {
2912     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2913   }
2914 };
2915 
2916 struct AAIsDeadArgument : public AAIsDeadFloating {
2917   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2918       : AAIsDeadFloating(IRP, A) {}
2919 
2920   /// See AbstractAttribute::initialize(...).
2921   void initialize(Attributor &A) override {
2922     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2923       indicatePessimisticFixpoint();
2924   }
2925 
2926   /// See AbstractAttribute::manifest(...).
2927   ChangeStatus manifest(Attributor &A) override {
2928     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2929     Argument &Arg = *getAssociatedArgument();
2930     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2931       if (A.registerFunctionSignatureRewrite(
2932               Arg, /* ReplacementTypes */ {},
2933               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2934               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2935         Arg.dropDroppableUses();
2936         return ChangeStatus::CHANGED;
2937       }
2938     return Changed;
2939   }
2940 
2941   /// See AbstractAttribute::trackStatistics()
2942   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2943 };
2944 
2945 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2946   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2947       : AAIsDeadValueImpl(IRP, A) {}
2948 
2949   /// See AbstractAttribute::initialize(...).
2950   void initialize(Attributor &A) override {
2951     if (isa<UndefValue>(getAssociatedValue()))
2952       indicatePessimisticFixpoint();
2953   }
2954 
2955   /// See AbstractAttribute::updateImpl(...).
2956   ChangeStatus updateImpl(Attributor &A) override {
2957     // TODO: Once we have call site specific value information we can provide
2958     //       call site specific liveness information and then it makes
2959     //       sense to specialize attributes for call sites arguments instead of
2960     //       redirecting requests to the callee argument.
2961     Argument *Arg = getAssociatedArgument();
2962     if (!Arg)
2963       return indicatePessimisticFixpoint();
2964     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2965     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2966     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2967   }
2968 
2969   /// See AbstractAttribute::manifest(...).
2970   ChangeStatus manifest(Attributor &A) override {
2971     CallBase &CB = cast<CallBase>(getAnchorValue());
2972     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2973     assert(!isa<UndefValue>(U.get()) &&
2974            "Expected undef values to be filtered out!");
2975     UndefValue &UV = *UndefValue::get(U->getType());
2976     if (A.changeUseAfterManifest(U, UV))
2977       return ChangeStatus::CHANGED;
2978     return ChangeStatus::UNCHANGED;
2979   }
2980 
2981   /// See AbstractAttribute::trackStatistics()
2982   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2983 };
2984 
2985 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2986   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2987       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2988 
2989   /// See AAIsDead::isAssumedDead().
2990   bool isAssumedDead() const override {
2991     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2992   }
2993 
2994   /// See AbstractAttribute::initialize(...).
2995   void initialize(Attributor &A) override {
2996     if (isa<UndefValue>(getAssociatedValue())) {
2997       indicatePessimisticFixpoint();
2998       return;
2999     }
3000 
3001     // We track this separately as a secondary state.
3002     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3003   }
3004 
3005   /// See AbstractAttribute::updateImpl(...).
3006   ChangeStatus updateImpl(Attributor &A) override {
3007     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3008     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3009       IsAssumedSideEffectFree = false;
3010       Changed = ChangeStatus::CHANGED;
3011     }
3012 
3013     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3014       return indicatePessimisticFixpoint();
3015     return Changed;
3016   }
3017 
3018   /// See AbstractAttribute::trackStatistics()
3019   void trackStatistics() const override {
3020     if (IsAssumedSideEffectFree)
3021       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3022     else
3023       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3024   }
3025 
3026   /// See AbstractAttribute::getAsStr().
3027   const std::string getAsStr() const override {
3028     return isAssumedDead()
3029                ? "assumed-dead"
3030                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3031   }
3032 
3033 private:
3034   bool IsAssumedSideEffectFree;
3035 };
3036 
3037 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3038   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3039       : AAIsDeadValueImpl(IRP, A) {}
3040 
3041   /// See AbstractAttribute::updateImpl(...).
3042   ChangeStatus updateImpl(Attributor &A) override {
3043 
3044     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3045                               {Instruction::Ret});
3046 
3047     auto PredForCallSite = [&](AbstractCallSite ACS) {
3048       if (ACS.isCallbackCall() || !ACS.getInstruction())
3049         return false;
3050       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3051     };
3052 
3053     bool AllCallSitesKnown;
3054     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3055                                 AllCallSitesKnown))
3056       return indicatePessimisticFixpoint();
3057 
3058     return ChangeStatus::UNCHANGED;
3059   }
3060 
3061   /// See AbstractAttribute::manifest(...).
3062   ChangeStatus manifest(Attributor &A) override {
3063     // TODO: Rewrite the signature to return void?
3064     bool AnyChange = false;
3065     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3066     auto RetInstPred = [&](Instruction &I) {
3067       ReturnInst &RI = cast<ReturnInst>(I);
3068       if (!isa<UndefValue>(RI.getReturnValue()))
3069         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3070       return true;
3071     };
3072     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3073     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3074   }
3075 
3076   /// See AbstractAttribute::trackStatistics()
3077   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3078 };
3079 
3080 struct AAIsDeadFunction : public AAIsDead {
3081   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3082 
3083   /// See AbstractAttribute::initialize(...).
3084   void initialize(Attributor &A) override {
3085     const Function *F = getAnchorScope();
3086     if (F && !F->isDeclaration()) {
3087       // We only want to compute liveness once. If the function is not part of
3088       // the SCC, skip it.
3089       if (A.isRunOn(*const_cast<Function *>(F))) {
3090         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3091         assumeLive(A, F->getEntryBlock());
3092       } else {
3093         indicatePessimisticFixpoint();
3094       }
3095     }
3096   }
3097 
3098   /// See AbstractAttribute::getAsStr().
3099   const std::string getAsStr() const override {
3100     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3101            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3102            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3103            std::to_string(KnownDeadEnds.size()) + "]";
3104   }
3105 
3106   /// See AbstractAttribute::manifest(...).
3107   ChangeStatus manifest(Attributor &A) override {
3108     assert(getState().isValidState() &&
3109            "Attempted to manifest an invalid state!");
3110 
3111     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3112     Function &F = *getAnchorScope();
3113 
3114     if (AssumedLiveBlocks.empty()) {
3115       A.deleteAfterManifest(F);
3116       return ChangeStatus::CHANGED;
3117     }
3118 
3119     // Flag to determine if we can change an invoke to a call assuming the
3120     // callee is nounwind. This is not possible if the personality of the
3121     // function allows to catch asynchronous exceptions.
3122     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3123 
3124     KnownDeadEnds.set_union(ToBeExploredFrom);
3125     for (const Instruction *DeadEndI : KnownDeadEnds) {
3126       auto *CB = dyn_cast<CallBase>(DeadEndI);
3127       if (!CB)
3128         continue;
3129       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3130           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3131       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3132       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3133         continue;
3134 
3135       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3136         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3137       else
3138         A.changeToUnreachableAfterManifest(
3139             const_cast<Instruction *>(DeadEndI->getNextNode()));
3140       HasChanged = ChangeStatus::CHANGED;
3141     }
3142 
3143     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3144     for (BasicBlock &BB : F)
3145       if (!AssumedLiveBlocks.count(&BB)) {
3146         A.deleteAfterManifest(BB);
3147         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3148       }
3149 
3150     return HasChanged;
3151   }
3152 
3153   /// See AbstractAttribute::updateImpl(...).
3154   ChangeStatus updateImpl(Attributor &A) override;
3155 
3156   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3157     return !AssumedLiveEdges.count(std::make_pair(From, To));
3158   }
3159 
3160   /// See AbstractAttribute::trackStatistics()
3161   void trackStatistics() const override {}
3162 
3163   /// Returns true if the function is assumed dead.
3164   bool isAssumedDead() const override { return false; }
3165 
3166   /// See AAIsDead::isKnownDead().
3167   bool isKnownDead() const override { return false; }
3168 
3169   /// See AAIsDead::isAssumedDead(BasicBlock *).
3170   bool isAssumedDead(const BasicBlock *BB) const override {
3171     assert(BB->getParent() == getAnchorScope() &&
3172            "BB must be in the same anchor scope function.");
3173 
3174     if (!getAssumed())
3175       return false;
3176     return !AssumedLiveBlocks.count(BB);
3177   }
3178 
3179   /// See AAIsDead::isKnownDead(BasicBlock *).
3180   bool isKnownDead(const BasicBlock *BB) const override {
3181     return getKnown() && isAssumedDead(BB);
3182   }
3183 
3184   /// See AAIsDead::isAssumed(Instruction *I).
3185   bool isAssumedDead(const Instruction *I) const override {
3186     assert(I->getParent()->getParent() == getAnchorScope() &&
3187            "Instruction must be in the same anchor scope function.");
3188 
3189     if (!getAssumed())
3190       return false;
3191 
3192     // If it is not in AssumedLiveBlocks then it for sure dead.
3193     // Otherwise, it can still be after noreturn call in a live block.
3194     if (!AssumedLiveBlocks.count(I->getParent()))
3195       return true;
3196 
3197     // If it is not after a liveness barrier it is live.
3198     const Instruction *PrevI = I->getPrevNode();
3199     while (PrevI) {
3200       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3201         return true;
3202       PrevI = PrevI->getPrevNode();
3203     }
3204     return false;
3205   }
3206 
3207   /// See AAIsDead::isKnownDead(Instruction *I).
3208   bool isKnownDead(const Instruction *I) const override {
3209     return getKnown() && isAssumedDead(I);
3210   }
3211 
3212   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3213   /// that internal function called from \p BB should now be looked at.
3214   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3215     if (!AssumedLiveBlocks.insert(&BB).second)
3216       return false;
3217 
3218     // We assume that all of BB is (probably) live now and if there are calls to
3219     // internal functions we will assume that those are now live as well. This
3220     // is a performance optimization for blocks with calls to a lot of internal
3221     // functions. It can however cause dead functions to be treated as live.
3222     for (const Instruction &I : BB)
3223       if (const auto *CB = dyn_cast<CallBase>(&I))
3224         if (const Function *F = CB->getCalledFunction())
3225           if (F->hasLocalLinkage())
3226             A.markLiveInternalFunction(*F);
3227     return true;
3228   }
3229 
3230   /// Collection of instructions that need to be explored again, e.g., we
3231   /// did assume they do not transfer control to (one of their) successors.
3232   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3233 
3234   /// Collection of instructions that are known to not transfer control.
3235   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3236 
3237   /// Collection of all assumed live edges
3238   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3239 
3240   /// Collection of all assumed live BasicBlocks.
3241   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3242 };
3243 
3244 static bool
3245 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3246                         AbstractAttribute &AA,
3247                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3248   const IRPosition &IPos = IRPosition::callsite_function(CB);
3249 
3250   const auto &NoReturnAA =
3251       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3252   if (NoReturnAA.isAssumedNoReturn())
3253     return !NoReturnAA.isKnownNoReturn();
3254   if (CB.isTerminator())
3255     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3256   else
3257     AliveSuccessors.push_back(CB.getNextNode());
3258   return false;
3259 }
3260 
3261 static bool
3262 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3263                         AbstractAttribute &AA,
3264                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3265   bool UsedAssumedInformation =
3266       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3267 
3268   // First, determine if we can change an invoke to a call assuming the
3269   // callee is nounwind. This is not possible if the personality of the
3270   // function allows to catch asynchronous exceptions.
3271   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3272     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3273   } else {
3274     const IRPosition &IPos = IRPosition::callsite_function(II);
3275     const auto &AANoUnw =
3276         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3277     if (AANoUnw.isAssumedNoUnwind()) {
3278       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3279     } else {
3280       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3281     }
3282   }
3283   return UsedAssumedInformation;
3284 }
3285 
3286 static bool
3287 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3288                         AbstractAttribute &AA,
3289                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3290   bool UsedAssumedInformation = false;
3291   if (BI.getNumSuccessors() == 1) {
3292     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3293   } else {
3294     Optional<Constant *> C =
3295         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3296     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3297       // No value yet, assume both edges are dead.
3298     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3299       const BasicBlock *SuccBB =
3300           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3301       AliveSuccessors.push_back(&SuccBB->front());
3302     } else {
3303       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3304       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3305       UsedAssumedInformation = false;
3306     }
3307   }
3308   return UsedAssumedInformation;
3309 }
3310 
3311 static bool
3312 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3313                         AbstractAttribute &AA,
3314                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3315   bool UsedAssumedInformation = false;
3316   Optional<Constant *> C =
3317       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3318   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3319     // No value yet, assume all edges are dead.
3320   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3321     for (auto &CaseIt : SI.cases()) {
3322       if (CaseIt.getCaseValue() == C.getValue()) {
3323         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3324         return UsedAssumedInformation;
3325       }
3326     }
3327     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3328     return UsedAssumedInformation;
3329   } else {
3330     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3331       AliveSuccessors.push_back(&SuccBB->front());
3332   }
3333   return UsedAssumedInformation;
3334 }
3335 
3336 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3337   ChangeStatus Change = ChangeStatus::UNCHANGED;
3338 
3339   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3340                     << getAnchorScope()->size() << "] BBs and "
3341                     << ToBeExploredFrom.size() << " exploration points and "
3342                     << KnownDeadEnds.size() << " known dead ends\n");
3343 
3344   // Copy and clear the list of instructions we need to explore from. It is
3345   // refilled with instructions the next update has to look at.
3346   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3347                                                ToBeExploredFrom.end());
3348   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3349 
3350   SmallVector<const Instruction *, 8> AliveSuccessors;
3351   while (!Worklist.empty()) {
3352     const Instruction *I = Worklist.pop_back_val();
3353     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3354 
3355     // Fast forward for uninteresting instructions. We could look for UB here
3356     // though.
3357     while (!I->isTerminator() && !isa<CallBase>(I)) {
3358       Change = ChangeStatus::CHANGED;
3359       I = I->getNextNode();
3360     }
3361 
3362     AliveSuccessors.clear();
3363 
3364     bool UsedAssumedInformation = false;
3365     switch (I->getOpcode()) {
3366     // TODO: look for (assumed) UB to backwards propagate "deadness".
3367     default:
3368       assert(I->isTerminator() &&
3369              "Expected non-terminators to be handled already!");
3370       for (const BasicBlock *SuccBB : successors(I->getParent()))
3371         AliveSuccessors.push_back(&SuccBB->front());
3372       break;
3373     case Instruction::Call:
3374       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3375                                                        *this, AliveSuccessors);
3376       break;
3377     case Instruction::Invoke:
3378       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3379                                                        *this, AliveSuccessors);
3380       break;
3381     case Instruction::Br:
3382       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3383                                                        *this, AliveSuccessors);
3384       break;
3385     case Instruction::Switch:
3386       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3387                                                        *this, AliveSuccessors);
3388       break;
3389     }
3390 
3391     if (UsedAssumedInformation) {
3392       NewToBeExploredFrom.insert(I);
3393     } else {
3394       Change = ChangeStatus::CHANGED;
3395       if (AliveSuccessors.empty() ||
3396           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3397         KnownDeadEnds.insert(I);
3398     }
3399 
3400     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3401                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3402                       << UsedAssumedInformation << "\n");
3403 
3404     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3405       if (!I->isTerminator()) {
3406         assert(AliveSuccessors.size() == 1 &&
3407                "Non-terminator expected to have a single successor!");
3408         Worklist.push_back(AliveSuccessor);
3409       } else {
3410         // record the assumed live edge
3411         AssumedLiveEdges.insert(
3412             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3413         if (assumeLive(A, *AliveSuccessor->getParent()))
3414           Worklist.push_back(AliveSuccessor);
3415       }
3416     }
3417   }
3418 
3419   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3420 
3421   // If we know everything is live there is no need to query for liveness.
3422   // Instead, indicating a pessimistic fixpoint will cause the state to be
3423   // "invalid" and all queries to be answered conservatively without lookups.
3424   // To be in this state we have to (1) finished the exploration and (3) not
3425   // discovered any non-trivial dead end and (2) not ruled unreachable code
3426   // dead.
3427   if (ToBeExploredFrom.empty() &&
3428       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3429       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3430         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3431       }))
3432     return indicatePessimisticFixpoint();
3433   return Change;
3434 }
3435 
3436 /// Liveness information for a call sites.
3437 struct AAIsDeadCallSite final : AAIsDeadFunction {
3438   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3439       : AAIsDeadFunction(IRP, A) {}
3440 
3441   /// See AbstractAttribute::initialize(...).
3442   void initialize(Attributor &A) override {
3443     // TODO: Once we have call site specific value information we can provide
3444     //       call site specific liveness information and then it makes
3445     //       sense to specialize attributes for call sites instead of
3446     //       redirecting requests to the callee.
3447     llvm_unreachable("Abstract attributes for liveness are not "
3448                      "supported for call sites yet!");
3449   }
3450 
3451   /// See AbstractAttribute::updateImpl(...).
3452   ChangeStatus updateImpl(Attributor &A) override {
3453     return indicatePessimisticFixpoint();
3454   }
3455 
3456   /// See AbstractAttribute::trackStatistics()
3457   void trackStatistics() const override {}
3458 };
3459 
3460 /// -------------------- Dereferenceable Argument Attribute --------------------
3461 
3462 template <>
3463 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3464                                                      const DerefState &R) {
3465   ChangeStatus CS0 =
3466       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3467   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3468   return CS0 | CS1;
3469 }
3470 
3471 struct AADereferenceableImpl : AADereferenceable {
3472   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3473       : AADereferenceable(IRP, A) {}
3474   using StateType = DerefState;
3475 
3476   /// See AbstractAttribute::initialize(...).
3477   void initialize(Attributor &A) override {
3478     SmallVector<Attribute, 4> Attrs;
3479     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3480              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3481     for (const Attribute &Attr : Attrs)
3482       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3483 
3484     const IRPosition &IRP = this->getIRPosition();
3485     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3486 
3487     bool CanBeNull, CanBeFreed;
3488     takeKnownDerefBytesMaximum(
3489         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3490             A.getDataLayout(), CanBeNull, CanBeFreed));
3491 
3492     bool IsFnInterface = IRP.isFnInterfaceKind();
3493     Function *FnScope = IRP.getAnchorScope();
3494     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3495       indicatePessimisticFixpoint();
3496       return;
3497     }
3498 
3499     if (Instruction *CtxI = getCtxI())
3500       followUsesInMBEC(*this, A, getState(), *CtxI);
3501   }
3502 
3503   /// See AbstractAttribute::getState()
3504   /// {
3505   StateType &getState() override { return *this; }
3506   const StateType &getState() const override { return *this; }
3507   /// }
3508 
3509   /// Helper function for collecting accessed bytes in must-be-executed-context
3510   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3511                               DerefState &State) {
3512     const Value *UseV = U->get();
3513     if (!UseV->getType()->isPointerTy())
3514       return;
3515 
3516     Type *PtrTy = UseV->getType();
3517     const DataLayout &DL = A.getDataLayout();
3518     int64_t Offset;
3519     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3520             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3521       if (Base == &getAssociatedValue() &&
3522           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3523         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3524         State.addAccessedBytes(Offset, Size);
3525       }
3526     }
3527   }
3528 
3529   /// See followUsesInMBEC
3530   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3531                        AADereferenceable::StateType &State) {
3532     bool IsNonNull = false;
3533     bool TrackUse = false;
3534     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3535         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3536     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3537                       << " for instruction " << *I << "\n");
3538 
3539     addAccessedBytesForUse(A, U, I, State);
3540     State.takeKnownDerefBytesMaximum(DerefBytes);
3541     return TrackUse;
3542   }
3543 
3544   /// See AbstractAttribute::manifest(...).
3545   ChangeStatus manifest(Attributor &A) override {
3546     ChangeStatus Change = AADereferenceable::manifest(A);
3547     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3548       removeAttrs({Attribute::DereferenceableOrNull});
3549       return ChangeStatus::CHANGED;
3550     }
3551     return Change;
3552   }
3553 
3554   void getDeducedAttributes(LLVMContext &Ctx,
3555                             SmallVectorImpl<Attribute> &Attrs) const override {
3556     // TODO: Add *_globally support
3557     if (isAssumedNonNull())
3558       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3559           Ctx, getAssumedDereferenceableBytes()));
3560     else
3561       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3562           Ctx, getAssumedDereferenceableBytes()));
3563   }
3564 
3565   /// See AbstractAttribute::getAsStr().
3566   const std::string getAsStr() const override {
3567     if (!getAssumedDereferenceableBytes())
3568       return "unknown-dereferenceable";
3569     return std::string("dereferenceable") +
3570            (isAssumedNonNull() ? "" : "_or_null") +
3571            (isAssumedGlobal() ? "_globally" : "") + "<" +
3572            std::to_string(getKnownDereferenceableBytes()) + "-" +
3573            std::to_string(getAssumedDereferenceableBytes()) + ">";
3574   }
3575 };
3576 
3577 /// Dereferenceable attribute for a floating value.
3578 struct AADereferenceableFloating : AADereferenceableImpl {
3579   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3580       : AADereferenceableImpl(IRP, A) {}
3581 
3582   /// See AbstractAttribute::updateImpl(...).
3583   ChangeStatus updateImpl(Attributor &A) override {
3584     const DataLayout &DL = A.getDataLayout();
3585 
3586     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3587                             bool Stripped) -> bool {
3588       unsigned IdxWidth =
3589           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3590       APInt Offset(IdxWidth, 0);
3591       const Value *Base =
3592           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3593 
3594       const auto &AA = A.getAAFor<AADereferenceable>(
3595           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3596       int64_t DerefBytes = 0;
3597       if (!Stripped && this == &AA) {
3598         // Use IR information if we did not strip anything.
3599         // TODO: track globally.
3600         bool CanBeNull, CanBeFreed;
3601         DerefBytes =
3602             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3603         T.GlobalState.indicatePessimisticFixpoint();
3604       } else {
3605         const DerefState &DS = AA.getState();
3606         DerefBytes = DS.DerefBytesState.getAssumed();
3607         T.GlobalState &= DS.GlobalState;
3608       }
3609 
3610       // For now we do not try to "increase" dereferenceability due to negative
3611       // indices as we first have to come up with code to deal with loops and
3612       // for overflows of the dereferenceable bytes.
3613       int64_t OffsetSExt = Offset.getSExtValue();
3614       if (OffsetSExt < 0)
3615         OffsetSExt = 0;
3616 
3617       T.takeAssumedDerefBytesMinimum(
3618           std::max(int64_t(0), DerefBytes - OffsetSExt));
3619 
3620       if (this == &AA) {
3621         if (!Stripped) {
3622           // If nothing was stripped IR information is all we got.
3623           T.takeKnownDerefBytesMaximum(
3624               std::max(int64_t(0), DerefBytes - OffsetSExt));
3625           T.indicatePessimisticFixpoint();
3626         } else if (OffsetSExt > 0) {
3627           // If something was stripped but there is circular reasoning we look
3628           // for the offset. If it is positive we basically decrease the
3629           // dereferenceable bytes in a circluar loop now, which will simply
3630           // drive them down to the known value in a very slow way which we
3631           // can accelerate.
3632           T.indicatePessimisticFixpoint();
3633         }
3634       }
3635 
3636       return T.isValidState();
3637     };
3638 
3639     DerefState T;
3640     if (!genericValueTraversal<AADereferenceable, DerefState>(
3641             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3642       return indicatePessimisticFixpoint();
3643 
3644     return clampStateAndIndicateChange(getState(), T);
3645   }
3646 
3647   /// See AbstractAttribute::trackStatistics()
3648   void trackStatistics() const override {
3649     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3650   }
3651 };
3652 
3653 /// Dereferenceable attribute for a return value.
3654 struct AADereferenceableReturned final
3655     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3656   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3657       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3658             IRP, A) {}
3659 
3660   /// See AbstractAttribute::trackStatistics()
3661   void trackStatistics() const override {
3662     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3663   }
3664 };
3665 
3666 /// Dereferenceable attribute for an argument
3667 struct AADereferenceableArgument final
3668     : AAArgumentFromCallSiteArguments<AADereferenceable,
3669                                       AADereferenceableImpl> {
3670   using Base =
3671       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3672   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3673       : Base(IRP, A) {}
3674 
3675   /// See AbstractAttribute::trackStatistics()
3676   void trackStatistics() const override {
3677     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3678   }
3679 };
3680 
3681 /// Dereferenceable attribute for a call site argument.
3682 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3683   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3684       : AADereferenceableFloating(IRP, A) {}
3685 
3686   /// See AbstractAttribute::trackStatistics()
3687   void trackStatistics() const override {
3688     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3689   }
3690 };
3691 
3692 /// Dereferenceable attribute deduction for a call site return value.
3693 struct AADereferenceableCallSiteReturned final
3694     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3695   using Base =
3696       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3697   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3698       : Base(IRP, A) {}
3699 
3700   /// See AbstractAttribute::trackStatistics()
3701   void trackStatistics() const override {
3702     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3703   }
3704 };
3705 
3706 // ------------------------ Align Argument Attribute ------------------------
3707 
3708 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3709                                     Value &AssociatedValue, const Use *U,
3710                                     const Instruction *I, bool &TrackUse) {
3711   // We need to follow common pointer manipulation uses to the accesses they
3712   // feed into.
3713   if (isa<CastInst>(I)) {
3714     // Follow all but ptr2int casts.
3715     TrackUse = !isa<PtrToIntInst>(I);
3716     return 0;
3717   }
3718   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3719     if (GEP->hasAllConstantIndices())
3720       TrackUse = true;
3721     return 0;
3722   }
3723 
3724   MaybeAlign MA;
3725   if (const auto *CB = dyn_cast<CallBase>(I)) {
3726     if (CB->isBundleOperand(U) || CB->isCallee(U))
3727       return 0;
3728 
3729     unsigned ArgNo = CB->getArgOperandNo(U);
3730     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3731     // As long as we only use known information there is no need to track
3732     // dependences here.
3733     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3734     MA = MaybeAlign(AlignAA.getKnownAlign());
3735   }
3736 
3737   const DataLayout &DL = A.getDataLayout();
3738   const Value *UseV = U->get();
3739   if (auto *SI = dyn_cast<StoreInst>(I)) {
3740     if (SI->getPointerOperand() == UseV)
3741       MA = SI->getAlign();
3742   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3743     if (LI->getPointerOperand() == UseV)
3744       MA = LI->getAlign();
3745   }
3746 
3747   if (!MA || *MA <= QueryingAA.getKnownAlign())
3748     return 0;
3749 
3750   unsigned Alignment = MA->value();
3751   int64_t Offset;
3752 
3753   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3754     if (Base == &AssociatedValue) {
3755       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3756       // So we can say that the maximum power of two which is a divisor of
3757       // gcd(Offset, Alignment) is an alignment.
3758 
3759       uint32_t gcd =
3760           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3761       Alignment = llvm::PowerOf2Floor(gcd);
3762     }
3763   }
3764 
3765   return Alignment;
3766 }
3767 
3768 struct AAAlignImpl : AAAlign {
3769   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3770 
3771   /// See AbstractAttribute::initialize(...).
3772   void initialize(Attributor &A) override {
3773     SmallVector<Attribute, 4> Attrs;
3774     getAttrs({Attribute::Alignment}, Attrs);
3775     for (const Attribute &Attr : Attrs)
3776       takeKnownMaximum(Attr.getValueAsInt());
3777 
3778     Value &V = getAssociatedValue();
3779     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3780     //       use of the function pointer. This was caused by D73131. We want to
3781     //       avoid this for function pointers especially because we iterate
3782     //       their uses and int2ptr is not handled. It is not a correctness
3783     //       problem though!
3784     if (!V.getType()->getPointerElementType()->isFunctionTy())
3785       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3786 
3787     if (getIRPosition().isFnInterfaceKind() &&
3788         (!getAnchorScope() ||
3789          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3790       indicatePessimisticFixpoint();
3791       return;
3792     }
3793 
3794     if (Instruction *CtxI = getCtxI())
3795       followUsesInMBEC(*this, A, getState(), *CtxI);
3796   }
3797 
3798   /// See AbstractAttribute::manifest(...).
3799   ChangeStatus manifest(Attributor &A) override {
3800     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3801 
3802     // Check for users that allow alignment annotations.
3803     Value &AssociatedValue = getAssociatedValue();
3804     for (const Use &U : AssociatedValue.uses()) {
3805       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3806         if (SI->getPointerOperand() == &AssociatedValue)
3807           if (SI->getAlignment() < getAssumedAlign()) {
3808             STATS_DECLTRACK(AAAlign, Store,
3809                             "Number of times alignment added to a store");
3810             SI->setAlignment(Align(getAssumedAlign()));
3811             LoadStoreChanged = ChangeStatus::CHANGED;
3812           }
3813       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3814         if (LI->getPointerOperand() == &AssociatedValue)
3815           if (LI->getAlignment() < getAssumedAlign()) {
3816             LI->setAlignment(Align(getAssumedAlign()));
3817             STATS_DECLTRACK(AAAlign, Load,
3818                             "Number of times alignment added to a load");
3819             LoadStoreChanged = ChangeStatus::CHANGED;
3820           }
3821       }
3822     }
3823 
3824     ChangeStatus Changed = AAAlign::manifest(A);
3825 
3826     Align InheritAlign =
3827         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3828     if (InheritAlign >= getAssumedAlign())
3829       return LoadStoreChanged;
3830     return Changed | LoadStoreChanged;
3831   }
3832 
3833   // TODO: Provide a helper to determine the implied ABI alignment and check in
3834   //       the existing manifest method and a new one for AAAlignImpl that value
3835   //       to avoid making the alignment explicit if it did not improve.
3836 
3837   /// See AbstractAttribute::getDeducedAttributes
3838   virtual void
3839   getDeducedAttributes(LLVMContext &Ctx,
3840                        SmallVectorImpl<Attribute> &Attrs) const override {
3841     if (getAssumedAlign() > 1)
3842       Attrs.emplace_back(
3843           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3844   }
3845 
3846   /// See followUsesInMBEC
3847   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3848                        AAAlign::StateType &State) {
3849     bool TrackUse = false;
3850 
3851     unsigned int KnownAlign =
3852         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3853     State.takeKnownMaximum(KnownAlign);
3854 
3855     return TrackUse;
3856   }
3857 
3858   /// See AbstractAttribute::getAsStr().
3859   const std::string getAsStr() const override {
3860     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3861                                 "-" + std::to_string(getAssumedAlign()) + ">")
3862                              : "unknown-align";
3863   }
3864 };
3865 
3866 /// Align attribute for a floating value.
3867 struct AAAlignFloating : AAAlignImpl {
3868   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3869 
3870   /// See AbstractAttribute::updateImpl(...).
3871   ChangeStatus updateImpl(Attributor &A) override {
3872     const DataLayout &DL = A.getDataLayout();
3873 
3874     auto VisitValueCB = [&](Value &V, const Instruction *,
3875                             AAAlign::StateType &T, bool Stripped) -> bool {
3876       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3877                                            DepClassTy::REQUIRED);
3878       if (!Stripped && this == &AA) {
3879         int64_t Offset;
3880         unsigned Alignment = 1;
3881         if (const Value *Base =
3882                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3883           Align PA = Base->getPointerAlignment(DL);
3884           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3885           // So we can say that the maximum power of two which is a divisor of
3886           // gcd(Offset, Alignment) is an alignment.
3887 
3888           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3889                                                uint32_t(PA.value()));
3890           Alignment = llvm::PowerOf2Floor(gcd);
3891         } else {
3892           Alignment = V.getPointerAlignment(DL).value();
3893         }
3894         // Use only IR information if we did not strip anything.
3895         T.takeKnownMaximum(Alignment);
3896         T.indicatePessimisticFixpoint();
3897       } else {
3898         // Use abstract attribute information.
3899         const AAAlign::StateType &DS = AA.getState();
3900         T ^= DS;
3901       }
3902       return T.isValidState();
3903     };
3904 
3905     StateType T;
3906     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3907                                                    VisitValueCB, getCtxI()))
3908       return indicatePessimisticFixpoint();
3909 
3910     // TODO: If we know we visited all incoming values, thus no are assumed
3911     // dead, we can take the known information from the state T.
3912     return clampStateAndIndicateChange(getState(), T);
3913   }
3914 
3915   /// See AbstractAttribute::trackStatistics()
3916   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3917 };
3918 
3919 /// Align attribute for function return value.
3920 struct AAAlignReturned final
3921     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3922   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3923   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3924 
3925   /// See AbstractAttribute::initialize(...).
3926   void initialize(Attributor &A) override {
3927     Base::initialize(A);
3928     Function *F = getAssociatedFunction();
3929     if (!F || F->isDeclaration())
3930       indicatePessimisticFixpoint();
3931   }
3932 
3933   /// See AbstractAttribute::trackStatistics()
3934   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3935 };
3936 
3937 /// Align attribute for function argument.
3938 struct AAAlignArgument final
3939     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3940   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3941   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3942 
3943   /// See AbstractAttribute::manifest(...).
3944   ChangeStatus manifest(Attributor &A) override {
3945     // If the associated argument is involved in a must-tail call we give up
3946     // because we would need to keep the argument alignments of caller and
3947     // callee in-sync. Just does not seem worth the trouble right now.
3948     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3949       return ChangeStatus::UNCHANGED;
3950     return Base::manifest(A);
3951   }
3952 
3953   /// See AbstractAttribute::trackStatistics()
3954   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3955 };
3956 
3957 struct AAAlignCallSiteArgument final : AAAlignFloating {
3958   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3959       : AAAlignFloating(IRP, A) {}
3960 
3961   /// See AbstractAttribute::manifest(...).
3962   ChangeStatus manifest(Attributor &A) override {
3963     // If the associated argument is involved in a must-tail call we give up
3964     // because we would need to keep the argument alignments of caller and
3965     // callee in-sync. Just does not seem worth the trouble right now.
3966     if (Argument *Arg = getAssociatedArgument())
3967       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3968         return ChangeStatus::UNCHANGED;
3969     ChangeStatus Changed = AAAlignImpl::manifest(A);
3970     Align InheritAlign =
3971         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3972     if (InheritAlign >= getAssumedAlign())
3973       Changed = ChangeStatus::UNCHANGED;
3974     return Changed;
3975   }
3976 
3977   /// See AbstractAttribute::updateImpl(Attributor &A).
3978   ChangeStatus updateImpl(Attributor &A) override {
3979     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3980     if (Argument *Arg = getAssociatedArgument()) {
3981       // We only take known information from the argument
3982       // so we do not need to track a dependence.
3983       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3984           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3985       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3986     }
3987     return Changed;
3988   }
3989 
3990   /// See AbstractAttribute::trackStatistics()
3991   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3992 };
3993 
3994 /// Align attribute deduction for a call site return value.
3995 struct AAAlignCallSiteReturned final
3996     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3997   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3998   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3999       : Base(IRP, A) {}
4000 
4001   /// See AbstractAttribute::initialize(...).
4002   void initialize(Attributor &A) override {
4003     Base::initialize(A);
4004     Function *F = getAssociatedFunction();
4005     if (!F || F->isDeclaration())
4006       indicatePessimisticFixpoint();
4007   }
4008 
4009   /// See AbstractAttribute::trackStatistics()
4010   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4011 };
4012 
4013 /// ------------------ Function No-Return Attribute ----------------------------
4014 struct AANoReturnImpl : public AANoReturn {
4015   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4016 
4017   /// See AbstractAttribute::initialize(...).
4018   void initialize(Attributor &A) override {
4019     AANoReturn::initialize(A);
4020     Function *F = getAssociatedFunction();
4021     if (!F || F->isDeclaration())
4022       indicatePessimisticFixpoint();
4023   }
4024 
4025   /// See AbstractAttribute::getAsStr().
4026   const std::string getAsStr() const override {
4027     return getAssumed() ? "noreturn" : "may-return";
4028   }
4029 
4030   /// See AbstractAttribute::updateImpl(Attributor &A).
4031   virtual ChangeStatus updateImpl(Attributor &A) override {
4032     auto CheckForNoReturn = [](Instruction &) { return false; };
4033     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4034                                    {(unsigned)Instruction::Ret}))
4035       return indicatePessimisticFixpoint();
4036     return ChangeStatus::UNCHANGED;
4037   }
4038 };
4039 
4040 struct AANoReturnFunction final : AANoReturnImpl {
4041   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4042       : AANoReturnImpl(IRP, A) {}
4043 
4044   /// See AbstractAttribute::trackStatistics()
4045   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4046 };
4047 
4048 /// NoReturn attribute deduction for a call sites.
4049 struct AANoReturnCallSite final : AANoReturnImpl {
4050   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4051       : AANoReturnImpl(IRP, A) {}
4052 
4053   /// See AbstractAttribute::initialize(...).
4054   void initialize(Attributor &A) override {
4055     AANoReturnImpl::initialize(A);
4056     if (Function *F = getAssociatedFunction()) {
4057       const IRPosition &FnPos = IRPosition::function(*F);
4058       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4059       if (!FnAA.isAssumedNoReturn())
4060         indicatePessimisticFixpoint();
4061     }
4062   }
4063 
4064   /// See AbstractAttribute::updateImpl(...).
4065   ChangeStatus updateImpl(Attributor &A) override {
4066     // TODO: Once we have call site specific value information we can provide
4067     //       call site specific liveness information and then it makes
4068     //       sense to specialize attributes for call sites arguments instead of
4069     //       redirecting requests to the callee argument.
4070     Function *F = getAssociatedFunction();
4071     const IRPosition &FnPos = IRPosition::function(*F);
4072     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4073     return clampStateAndIndicateChange(getState(), FnAA.getState());
4074   }
4075 
4076   /// See AbstractAttribute::trackStatistics()
4077   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4078 };
4079 
4080 /// ----------------------- Variable Capturing ---------------------------------
4081 
4082 /// A class to hold the state of for no-capture attributes.
4083 struct AANoCaptureImpl : public AANoCapture {
4084   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4085 
4086   /// See AbstractAttribute::initialize(...).
4087   void initialize(Attributor &A) override {
4088     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4089       indicateOptimisticFixpoint();
4090       return;
4091     }
4092     Function *AnchorScope = getAnchorScope();
4093     if (isFnInterfaceKind() &&
4094         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4095       indicatePessimisticFixpoint();
4096       return;
4097     }
4098 
4099     // You cannot "capture" null in the default address space.
4100     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4101         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4102       indicateOptimisticFixpoint();
4103       return;
4104     }
4105 
4106     const Function *F =
4107         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4108 
4109     // Check what state the associated function can actually capture.
4110     if (F)
4111       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4112     else
4113       indicatePessimisticFixpoint();
4114   }
4115 
4116   /// See AbstractAttribute::updateImpl(...).
4117   ChangeStatus updateImpl(Attributor &A) override;
4118 
4119   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4120   virtual void
4121   getDeducedAttributes(LLVMContext &Ctx,
4122                        SmallVectorImpl<Attribute> &Attrs) const override {
4123     if (!isAssumedNoCaptureMaybeReturned())
4124       return;
4125 
4126     if (isArgumentPosition()) {
4127       if (isAssumedNoCapture())
4128         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4129       else if (ManifestInternal)
4130         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4131     }
4132   }
4133 
4134   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4135   /// depending on the ability of the function associated with \p IRP to capture
4136   /// state in memory and through "returning/throwing", respectively.
4137   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4138                                                    const Function &F,
4139                                                    BitIntegerState &State) {
4140     // TODO: Once we have memory behavior attributes we should use them here.
4141 
4142     // If we know we cannot communicate or write to memory, we do not care about
4143     // ptr2int anymore.
4144     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4145         F.getReturnType()->isVoidTy()) {
4146       State.addKnownBits(NO_CAPTURE);
4147       return;
4148     }
4149 
4150     // A function cannot capture state in memory if it only reads memory, it can
4151     // however return/throw state and the state might be influenced by the
4152     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4153     if (F.onlyReadsMemory())
4154       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4155 
4156     // A function cannot communicate state back if it does not through
4157     // exceptions and doesn not return values.
4158     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4159       State.addKnownBits(NOT_CAPTURED_IN_RET);
4160 
4161     // Check existing "returned" attributes.
4162     int ArgNo = IRP.getCalleeArgNo();
4163     if (F.doesNotThrow() && ArgNo >= 0) {
4164       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4165         if (F.hasParamAttribute(u, Attribute::Returned)) {
4166           if (u == unsigned(ArgNo))
4167             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4168           else if (F.onlyReadsMemory())
4169             State.addKnownBits(NO_CAPTURE);
4170           else
4171             State.addKnownBits(NOT_CAPTURED_IN_RET);
4172           break;
4173         }
4174     }
4175   }
4176 
4177   /// See AbstractState::getAsStr().
4178   const std::string getAsStr() const override {
4179     if (isKnownNoCapture())
4180       return "known not-captured";
4181     if (isAssumedNoCapture())
4182       return "assumed not-captured";
4183     if (isKnownNoCaptureMaybeReturned())
4184       return "known not-captured-maybe-returned";
4185     if (isAssumedNoCaptureMaybeReturned())
4186       return "assumed not-captured-maybe-returned";
4187     return "assumed-captured";
4188   }
4189 };
4190 
4191 /// Attributor-aware capture tracker.
4192 struct AACaptureUseTracker final : public CaptureTracker {
4193 
4194   /// Create a capture tracker that can lookup in-flight abstract attributes
4195   /// through the Attributor \p A.
4196   ///
4197   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4198   /// search is stopped. If a use leads to a return instruction,
4199   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4200   /// If a use leads to a ptr2int which may capture the value,
4201   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4202   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4203   /// set. All values in \p PotentialCopies are later tracked as well. For every
4204   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4205   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4206   /// conservatively set to true.
4207   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4208                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4209                       SmallVectorImpl<const Value *> &PotentialCopies,
4210                       unsigned &RemainingUsesToExplore)
4211       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4212         PotentialCopies(PotentialCopies),
4213         RemainingUsesToExplore(RemainingUsesToExplore) {}
4214 
4215   /// Determine if \p V maybe captured. *Also updates the state!*
4216   bool valueMayBeCaptured(const Value *V) {
4217     if (V->getType()->isPointerTy()) {
4218       PointerMayBeCaptured(V, this);
4219     } else {
4220       State.indicatePessimisticFixpoint();
4221     }
4222     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4223   }
4224 
4225   /// See CaptureTracker::tooManyUses().
4226   void tooManyUses() override {
4227     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4228   }
4229 
4230   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4231     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4232       return true;
4233     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4234         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4235     return DerefAA.getAssumedDereferenceableBytes();
4236   }
4237 
4238   /// See CaptureTracker::captured(...).
4239   bool captured(const Use *U) override {
4240     Instruction *UInst = cast<Instruction>(U->getUser());
4241     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4242                       << "\n");
4243 
4244     // Because we may reuse the tracker multiple times we keep track of the
4245     // number of explored uses ourselves as well.
4246     if (RemainingUsesToExplore-- == 0) {
4247       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4248       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4249                           /* Return */ true);
4250     }
4251 
4252     // Deal with ptr2int by following uses.
4253     if (isa<PtrToIntInst>(UInst)) {
4254       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4255       return valueMayBeCaptured(UInst);
4256     }
4257 
4258     // Explicitly catch return instructions.
4259     if (isa<ReturnInst>(UInst))
4260       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4261                           /* Return */ true);
4262 
4263     // For now we only use special logic for call sites. However, the tracker
4264     // itself knows about a lot of other non-capturing cases already.
4265     auto *CB = dyn_cast<CallBase>(UInst);
4266     if (!CB || !CB->isArgOperand(U))
4267       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4268                           /* Return */ true);
4269 
4270     unsigned ArgNo = CB->getArgOperandNo(U);
4271     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4272     // If we have a abstract no-capture attribute for the argument we can use
4273     // it to justify a non-capture attribute here. This allows recursion!
4274     auto &ArgNoCaptureAA =
4275         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4276     if (ArgNoCaptureAA.isAssumedNoCapture())
4277       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4278                           /* Return */ false);
4279     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4280       addPotentialCopy(*CB);
4281       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4282                           /* Return */ false);
4283     }
4284 
4285     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4286     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4287                         /* Return */ true);
4288   }
4289 
4290   /// Register \p CS as potential copy of the value we are checking.
4291   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4292 
4293   /// See CaptureTracker::shouldExplore(...).
4294   bool shouldExplore(const Use *U) override {
4295     // Check liveness and ignore droppable users.
4296     return !U->getUser()->isDroppable() &&
4297            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4298   }
4299 
4300   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4301   /// \p CapturedInRet, then return the appropriate value for use in the
4302   /// CaptureTracker::captured() interface.
4303   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4304                     bool CapturedInRet) {
4305     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4306                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4307     if (CapturedInMem)
4308       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4309     if (CapturedInInt)
4310       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4311     if (CapturedInRet)
4312       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4313     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4314   }
4315 
4316 private:
4317   /// The attributor providing in-flight abstract attributes.
4318   Attributor &A;
4319 
4320   /// The abstract attribute currently updated.
4321   AANoCapture &NoCaptureAA;
4322 
4323   /// The abstract liveness state.
4324   const AAIsDead &IsDeadAA;
4325 
4326   /// The state currently updated.
4327   AANoCapture::StateType &State;
4328 
4329   /// Set of potential copies of the tracked value.
4330   SmallVectorImpl<const Value *> &PotentialCopies;
4331 
4332   /// Global counter to limit the number of explored uses.
4333   unsigned &RemainingUsesToExplore;
4334 };
4335 
4336 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4337   const IRPosition &IRP = getIRPosition();
4338   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4339                                         : &IRP.getAssociatedValue();
4340   if (!V)
4341     return indicatePessimisticFixpoint();
4342 
4343   const Function *F =
4344       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4345   assert(F && "Expected a function!");
4346   const IRPosition &FnPos = IRPosition::function(*F);
4347   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4348 
4349   AANoCapture::StateType T;
4350 
4351   // Readonly means we cannot capture through memory.
4352   const auto &FnMemAA =
4353       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4354   if (FnMemAA.isAssumedReadOnly()) {
4355     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4356     if (FnMemAA.isKnownReadOnly())
4357       addKnownBits(NOT_CAPTURED_IN_MEM);
4358     else
4359       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4360   }
4361 
4362   // Make sure all returned values are different than the underlying value.
4363   // TODO: we could do this in a more sophisticated way inside
4364   //       AAReturnedValues, e.g., track all values that escape through returns
4365   //       directly somehow.
4366   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4367     bool SeenConstant = false;
4368     for (auto &It : RVAA.returned_values()) {
4369       if (isa<Constant>(It.first)) {
4370         if (SeenConstant)
4371           return false;
4372         SeenConstant = true;
4373       } else if (!isa<Argument>(It.first) ||
4374                  It.first == getAssociatedArgument())
4375         return false;
4376     }
4377     return true;
4378   };
4379 
4380   const auto &NoUnwindAA =
4381       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4382   if (NoUnwindAA.isAssumedNoUnwind()) {
4383     bool IsVoidTy = F->getReturnType()->isVoidTy();
4384     const AAReturnedValues *RVAA =
4385         IsVoidTy ? nullptr
4386                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4387 
4388                                                  DepClassTy::OPTIONAL);
4389     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4390       T.addKnownBits(NOT_CAPTURED_IN_RET);
4391       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4392         return ChangeStatus::UNCHANGED;
4393       if (NoUnwindAA.isKnownNoUnwind() &&
4394           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4395         addKnownBits(NOT_CAPTURED_IN_RET);
4396         if (isKnown(NOT_CAPTURED_IN_MEM))
4397           return indicateOptimisticFixpoint();
4398       }
4399     }
4400   }
4401 
4402   // Use the CaptureTracker interface and logic with the specialized tracker,
4403   // defined in AACaptureUseTracker, that can look at in-flight abstract
4404   // attributes and directly updates the assumed state.
4405   SmallVector<const Value *, 4> PotentialCopies;
4406   unsigned RemainingUsesToExplore =
4407       getDefaultMaxUsesToExploreForCaptureTracking();
4408   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4409                               RemainingUsesToExplore);
4410 
4411   // Check all potential copies of the associated value until we can assume
4412   // none will be captured or we have to assume at least one might be.
4413   unsigned Idx = 0;
4414   PotentialCopies.push_back(V);
4415   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4416     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4417 
4418   AANoCapture::StateType &S = getState();
4419   auto Assumed = S.getAssumed();
4420   S.intersectAssumedBits(T.getAssumed());
4421   if (!isAssumedNoCaptureMaybeReturned())
4422     return indicatePessimisticFixpoint();
4423   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4424                                    : ChangeStatus::CHANGED;
4425 }
4426 
4427 /// NoCapture attribute for function arguments.
4428 struct AANoCaptureArgument final : AANoCaptureImpl {
4429   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4430       : AANoCaptureImpl(IRP, A) {}
4431 
4432   /// See AbstractAttribute::trackStatistics()
4433   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4434 };
4435 
4436 /// NoCapture attribute for call site arguments.
4437 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4438   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4439       : AANoCaptureImpl(IRP, A) {}
4440 
4441   /// See AbstractAttribute::initialize(...).
4442   void initialize(Attributor &A) override {
4443     if (Argument *Arg = getAssociatedArgument())
4444       if (Arg->hasByValAttr())
4445         indicateOptimisticFixpoint();
4446     AANoCaptureImpl::initialize(A);
4447   }
4448 
4449   /// See AbstractAttribute::updateImpl(...).
4450   ChangeStatus updateImpl(Attributor &A) override {
4451     // TODO: Once we have call site specific value information we can provide
4452     //       call site specific liveness information and then it makes
4453     //       sense to specialize attributes for call sites arguments instead of
4454     //       redirecting requests to the callee argument.
4455     Argument *Arg = getAssociatedArgument();
4456     if (!Arg)
4457       return indicatePessimisticFixpoint();
4458     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4459     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4460     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4461   }
4462 
4463   /// See AbstractAttribute::trackStatistics()
4464   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4465 };
4466 
4467 /// NoCapture attribute for floating values.
4468 struct AANoCaptureFloating final : AANoCaptureImpl {
4469   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4470       : AANoCaptureImpl(IRP, A) {}
4471 
4472   /// See AbstractAttribute::trackStatistics()
4473   void trackStatistics() const override {
4474     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4475   }
4476 };
4477 
4478 /// NoCapture attribute for function return value.
4479 struct AANoCaptureReturned final : AANoCaptureImpl {
4480   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4481       : AANoCaptureImpl(IRP, A) {
4482     llvm_unreachable("NoCapture is not applicable to function returns!");
4483   }
4484 
4485   /// See AbstractAttribute::initialize(...).
4486   void initialize(Attributor &A) override {
4487     llvm_unreachable("NoCapture is not applicable to function returns!");
4488   }
4489 
4490   /// See AbstractAttribute::updateImpl(...).
4491   ChangeStatus updateImpl(Attributor &A) override {
4492     llvm_unreachable("NoCapture is not applicable to function returns!");
4493   }
4494 
4495   /// See AbstractAttribute::trackStatistics()
4496   void trackStatistics() const override {}
4497 };
4498 
4499 /// NoCapture attribute deduction for a call site return value.
4500 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4501   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4502       : AANoCaptureImpl(IRP, A) {}
4503 
4504   /// See AbstractAttribute::initialize(...).
4505   void initialize(Attributor &A) override {
4506     const Function *F = getAnchorScope();
4507     // Check what state the associated function can actually capture.
4508     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4509   }
4510 
4511   /// See AbstractAttribute::trackStatistics()
4512   void trackStatistics() const override {
4513     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4514   }
4515 };
4516 
4517 /// ------------------ Value Simplify Attribute ----------------------------
4518 struct AAValueSimplifyImpl : AAValueSimplify {
4519   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4520       : AAValueSimplify(IRP, A) {}
4521 
4522   /// See AbstractAttribute::initialize(...).
4523   void initialize(Attributor &A) override {
4524     if (getAssociatedValue().getType()->isVoidTy())
4525       indicatePessimisticFixpoint();
4526   }
4527 
4528   /// See AbstractAttribute::getAsStr().
4529   const std::string getAsStr() const override {
4530     LLVM_DEBUG({
4531       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4532       if (SimplifiedAssociatedValue)
4533         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4534     });
4535     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4536                         : "not-simple";
4537   }
4538 
4539   /// See AbstractAttribute::trackStatistics()
4540   void trackStatistics() const override {}
4541 
4542   /// See AAValueSimplify::getAssumedSimplifiedValue()
4543   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4544     if (!getAssumed())
4545       return const_cast<Value *>(&getAssociatedValue());
4546     return SimplifiedAssociatedValue;
4547   }
4548 
4549   /// Helper function for querying AAValueSimplify and updating candicate.
4550   /// \param QueryingValue Value trying to unify with SimplifiedValue
4551   /// \param AccumulatedSimplifiedValue Current simplification result.
4552   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4553                              Value &QueryingValue,
4554                              Optional<Value *> &AccumulatedSimplifiedValue) {
4555     // FIXME: Add a typecast support.
4556 
4557     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4558         QueryingAA,
4559         IRPosition::value(QueryingValue, QueryingAA.getCallBaseContext()),
4560         DepClassTy::REQUIRED);
4561 
4562     Optional<Value *> QueryingValueSimplified =
4563         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4564 
4565     if (!QueryingValueSimplified.hasValue())
4566       return true;
4567 
4568     if (!QueryingValueSimplified.getValue())
4569       return false;
4570 
4571     Value &QueryingValueSimplifiedUnwrapped =
4572         *QueryingValueSimplified.getValue();
4573 
4574     if (AccumulatedSimplifiedValue.hasValue() &&
4575         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4576         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4577       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4578     if (AccumulatedSimplifiedValue.hasValue() &&
4579         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4580       return true;
4581 
4582     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4583                       << " is assumed to be "
4584                       << QueryingValueSimplifiedUnwrapped << "\n");
4585 
4586     AccumulatedSimplifiedValue = QueryingValueSimplified;
4587     return true;
4588   }
4589 
4590   /// Returns a candidate is found or not
4591   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4592     if (!getAssociatedValue().getType()->isIntegerTy())
4593       return false;
4594 
4595     // This will also pass the call base context.
4596     const auto &AA =
4597         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4598 
4599     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4600 
4601     if (!COpt.hasValue()) {
4602       SimplifiedAssociatedValue = llvm::None;
4603       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4604       return true;
4605     }
4606     if (auto *C = COpt.getValue()) {
4607       SimplifiedAssociatedValue = C;
4608       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4609       return true;
4610     }
4611     return false;
4612   }
4613 
4614   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4615     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4616       return true;
4617     if (askSimplifiedValueFor<AAPotentialValues>(A))
4618       return true;
4619     return false;
4620   }
4621 
4622   /// See AbstractAttribute::manifest(...).
4623   ChangeStatus manifest(Attributor &A) override {
4624     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4625 
4626     if (SimplifiedAssociatedValue.hasValue() &&
4627         !SimplifiedAssociatedValue.getValue())
4628       return Changed;
4629 
4630     Value &V = getAssociatedValue();
4631     auto *C = SimplifiedAssociatedValue.hasValue()
4632                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4633                   : UndefValue::get(V.getType());
4634     if (C && C != &V) {
4635       Value *NewV = AA::getWithType(*C, *V.getType());
4636       // We can replace the AssociatedValue with the constant.
4637       if (!V.user_empty() && &V != C && NewV) {
4638         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4639                           << " :: " << *this << "\n");
4640         if (A.changeValueAfterManifest(V, *NewV))
4641           Changed = ChangeStatus::CHANGED;
4642       }
4643     }
4644 
4645     return Changed | AAValueSimplify::manifest(A);
4646   }
4647 
4648   /// See AbstractState::indicatePessimisticFixpoint(...).
4649   ChangeStatus indicatePessimisticFixpoint() override {
4650     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4651     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4652     SimplifiedAssociatedValue = &getAssociatedValue();
4653     indicateOptimisticFixpoint();
4654     return ChangeStatus::CHANGED;
4655   }
4656 
4657 protected:
4658   // An assumed simplified value. Initially, it is set to Optional::None, which
4659   // means that the value is not clear under current assumption. If in the
4660   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4661   // returns orignal associated value.
4662   Optional<Value *> SimplifiedAssociatedValue;
4663 };
4664 
4665 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4666   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4667       : AAValueSimplifyImpl(IRP, A) {}
4668 
4669   void initialize(Attributor &A) override {
4670     AAValueSimplifyImpl::initialize(A);
4671     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4672       indicatePessimisticFixpoint();
4673     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4674                  Attribute::StructRet, Attribute::Nest},
4675                 /* IgnoreSubsumingPositions */ true))
4676       indicatePessimisticFixpoint();
4677 
4678     // FIXME: This is a hack to prevent us from propagating function poiner in
4679     // the new pass manager CGSCC pass as it creates call edges the
4680     // CallGraphUpdater cannot handle yet.
4681     Value &V = getAssociatedValue();
4682     if (V.getType()->isPointerTy() &&
4683         V.getType()->getPointerElementType()->isFunctionTy() &&
4684         !A.isModulePass())
4685       indicatePessimisticFixpoint();
4686   }
4687 
4688   /// See AbstractAttribute::updateImpl(...).
4689   ChangeStatus updateImpl(Attributor &A) override {
4690     // Byval is only replacable if it is readonly otherwise we would write into
4691     // the replaced value and not the copy that byval creates implicitly.
4692     Argument *Arg = getAssociatedArgument();
4693     if (Arg->hasByValAttr()) {
4694       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4695       //       there is no race by not copying a constant byval.
4696       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4697                                                        DepClassTy::REQUIRED);
4698       if (!MemAA.isAssumedReadOnly())
4699         return indicatePessimisticFixpoint();
4700     }
4701 
4702     auto Before = SimplifiedAssociatedValue;
4703 
4704     auto PredForCallSite = [&](AbstractCallSite ACS) {
4705       const IRPosition &ACSArgPos =
4706           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4707       // Check if a coresponding argument was found or if it is on not
4708       // associated (which can happen for callback calls).
4709       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4710         return false;
4711 
4712       // We can only propagate thread independent values through callbacks.
4713       // This is different to direct/indirect call sites because for them we
4714       // know the thread executing the caller and callee is the same. For
4715       // callbacks this is not guaranteed, thus a thread dependent value could
4716       // be different for the caller and callee, making it invalid to propagate.
4717       Value &ArgOp = ACSArgPos.getAssociatedValue();
4718       if (ACS.isCallbackCall())
4719         if (auto *C = dyn_cast<Constant>(&ArgOp))
4720           if (C->isThreadDependent())
4721             return false;
4722       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4723     };
4724 
4725     // Generate a answer specific to a call site context.
4726     bool Success;
4727     bool AllCallSitesKnown;
4728     if (hasCallBaseContext())
4729       Success = PredForCallSite(
4730           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4731     else
4732       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4733                                        AllCallSitesKnown);
4734 
4735     if (!Success)
4736       if (!askSimplifiedValueForOtherAAs(A))
4737         return indicatePessimisticFixpoint();
4738 
4739     // If a candicate was found in this update, return CHANGED.
4740     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4741                                                : ChangeStatus ::CHANGED;
4742   }
4743 
4744   /// See AbstractAttribute::trackStatistics()
4745   void trackStatistics() const override {
4746     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4747   }
4748 };
4749 
4750 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4751   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4752       : AAValueSimplifyImpl(IRP, A) {}
4753 
4754   /// See AbstractAttribute::updateImpl(...).
4755   ChangeStatus updateImpl(Attributor &A) override {
4756     auto Before = SimplifiedAssociatedValue;
4757 
4758     auto PredForReturned = [&](Value &V) {
4759       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4760     };
4761 
4762     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4763       if (!askSimplifiedValueForOtherAAs(A))
4764         return indicatePessimisticFixpoint();
4765 
4766     // If a candicate was found in this update, return CHANGED.
4767     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4768                                                : ChangeStatus ::CHANGED;
4769   }
4770 
4771   ChangeStatus manifest(Attributor &A) override {
4772     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4773 
4774     if (SimplifiedAssociatedValue.hasValue() &&
4775         !SimplifiedAssociatedValue.getValue())
4776       return Changed;
4777 
4778     Value &V = getAssociatedValue();
4779     auto *C = SimplifiedAssociatedValue.hasValue()
4780                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4781                   : UndefValue::get(V.getType());
4782     if (C && C != &V) {
4783       auto PredForReturned =
4784           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4785             // We can replace the AssociatedValue with the constant.
4786             if (&V == C || isa<UndefValue>(V))
4787               return true;
4788 
4789             for (ReturnInst *RI : RetInsts) {
4790               if (RI->getFunction() != getAnchorScope())
4791                 continue;
4792               Value *NewV =
4793                   AA::getWithType(*C, *RI->getReturnValue()->getType());
4794               if (!NewV)
4795                 continue;
4796               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4797                                 << " in " << *RI << " :: " << *this << "\n");
4798               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4799                 Changed = ChangeStatus::CHANGED;
4800             }
4801             return true;
4802           };
4803       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4804     }
4805 
4806     return Changed | AAValueSimplify::manifest(A);
4807   }
4808 
4809   /// See AbstractAttribute::trackStatistics()
4810   void trackStatistics() const override {
4811     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4812   }
4813 };
4814 
4815 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4816   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4817       : AAValueSimplifyImpl(IRP, A) {}
4818 
4819   /// See AbstractAttribute::initialize(...).
4820   void initialize(Attributor &A) override {
4821     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4822     //        Needs investigation.
4823     // AAValueSimplifyImpl::initialize(A);
4824     Value &V = getAnchorValue();
4825 
4826     // TODO: add other stuffs
4827     if (isa<Constant>(V))
4828       indicatePessimisticFixpoint();
4829   }
4830 
4831   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4832   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4833   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4834   /// updated and \p Changed is set appropriately.
4835   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4836                               ChangeStatus &Changed) {
4837     if (!ICmp)
4838       return false;
4839     if (!ICmp->isEquality())
4840       return false;
4841 
4842     // This is a comparison with == or !-. We check for nullptr now.
4843     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4844     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4845     if (!Op0IsNull && !Op1IsNull)
4846       return false;
4847 
4848     LLVMContext &Ctx = ICmp->getContext();
4849     // Check for `nullptr ==/!= nullptr` first:
4850     if (Op0IsNull && Op1IsNull) {
4851       Value *NewVal = ConstantInt::get(
4852           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4853       assert(!SimplifiedAssociatedValue.hasValue() &&
4854              "Did not expect non-fixed value for constant comparison");
4855       SimplifiedAssociatedValue = NewVal;
4856       indicateOptimisticFixpoint();
4857       Changed = ChangeStatus::CHANGED;
4858       return true;
4859     }
4860 
4861     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4862     // non-nullptr operand and if we assume it's non-null we can conclude the
4863     // result of the comparison.
4864     assert((Op0IsNull || Op1IsNull) &&
4865            "Expected nullptr versus non-nullptr comparison at this point");
4866 
4867     // The index is the operand that we assume is not null.
4868     unsigned PtrIdx = Op0IsNull;
4869     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4870         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4871         DepClassTy::REQUIRED);
4872     if (!PtrNonNullAA.isAssumedNonNull())
4873       return false;
4874 
4875     // The new value depends on the predicate, true for != and false for ==.
4876     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4877                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4878 
4879     assert((!SimplifiedAssociatedValue.hasValue() ||
4880             SimplifiedAssociatedValue == NewVal) &&
4881            "Did not expect to change value for zero-comparison");
4882 
4883     auto Before = SimplifiedAssociatedValue;
4884     SimplifiedAssociatedValue = NewVal;
4885 
4886     if (PtrNonNullAA.isKnownNonNull())
4887       indicateOptimisticFixpoint();
4888 
4889     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4890                                                   : ChangeStatus ::CHANGED;
4891     return true;
4892   }
4893 
4894   /// See AbstractAttribute::updateImpl(...).
4895   ChangeStatus updateImpl(Attributor &A) override {
4896     auto Before = SimplifiedAssociatedValue;
4897 
4898     ChangeStatus Changed;
4899     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4900                                Changed))
4901       return Changed;
4902 
4903     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4904                             bool Stripped) -> bool {
4905       auto &AA = A.getAAFor<AAValueSimplify>(
4906           *this, IRPosition::value(V, getCallBaseContext()),
4907           DepClassTy::REQUIRED);
4908       if (!Stripped && this == &AA) {
4909         // TODO: Look the instruction and check recursively.
4910 
4911         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4912                           << "\n");
4913         return false;
4914       }
4915       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4916     };
4917 
4918     bool Dummy = false;
4919     if (!genericValueTraversal<AAValueSimplify, bool>(
4920             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4921             /* UseValueSimplify */ false))
4922       if (!askSimplifiedValueForOtherAAs(A))
4923         return indicatePessimisticFixpoint();
4924 
4925     // If a candicate was found in this update, return CHANGED.
4926     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4927                                                : ChangeStatus ::CHANGED;
4928   }
4929 
4930   /// See AbstractAttribute::trackStatistics()
4931   void trackStatistics() const override {
4932     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4933   }
4934 };
4935 
4936 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4937   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4938       : AAValueSimplifyImpl(IRP, A) {}
4939 
4940   /// See AbstractAttribute::initialize(...).
4941   void initialize(Attributor &A) override {
4942     SimplifiedAssociatedValue = &getAnchorValue();
4943     indicateOptimisticFixpoint();
4944   }
4945   /// See AbstractAttribute::initialize(...).
4946   ChangeStatus updateImpl(Attributor &A) override {
4947     llvm_unreachable(
4948         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4949   }
4950   /// See AbstractAttribute::trackStatistics()
4951   void trackStatistics() const override {
4952     STATS_DECLTRACK_FN_ATTR(value_simplify)
4953   }
4954 };
4955 
4956 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4957   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4958       : AAValueSimplifyFunction(IRP, A) {}
4959   /// See AbstractAttribute::trackStatistics()
4960   void trackStatistics() const override {
4961     STATS_DECLTRACK_CS_ATTR(value_simplify)
4962   }
4963 };
4964 
4965 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4966   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4967       : AAValueSimplifyReturned(IRP, A) {}
4968 
4969   /// See AbstractAttribute::manifest(...).
4970   ChangeStatus manifest(Attributor &A) override {
4971     return AAValueSimplifyImpl::manifest(A);
4972   }
4973 
4974   void trackStatistics() const override {
4975     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4976   }
4977 };
4978 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4979   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4980       : AAValueSimplifyFloating(IRP, A) {}
4981 
4982   /// See AbstractAttribute::manifest(...).
4983   ChangeStatus manifest(Attributor &A) override {
4984     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4985 
4986     if (SimplifiedAssociatedValue.hasValue() &&
4987         !SimplifiedAssociatedValue.getValue())
4988       return Changed;
4989 
4990     Value &V = getAssociatedValue();
4991     auto *C = SimplifiedAssociatedValue.hasValue()
4992                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4993                   : UndefValue::get(V.getType());
4994     if (C) {
4995       Use &U = cast<CallBase>(&getAnchorValue())
4996                    ->getArgOperandUse(getCallSiteArgNo());
4997       // We can replace the AssociatedValue with the constant.
4998       if (&V != C) {
4999         if (Value *NewV = AA::getWithType(*C, *V.getType()))
5000           if (A.changeUseAfterManifest(U, *NewV))
5001             Changed = ChangeStatus::CHANGED;
5002       }
5003     }
5004 
5005     return Changed | AAValueSimplify::manifest(A);
5006   }
5007 
5008   void trackStatistics() const override {
5009     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5010   }
5011 };
5012 
5013 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5014 struct AAHeapToStackImpl : public AAHeapToStack {
5015   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5016       : AAHeapToStack(IRP, A) {}
5017 
5018   const std::string getAsStr() const override {
5019     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5020   }
5021 
5022   bool isAssumedHeapToStack(CallBase &CB) const override {
5023     return isValidState() && MallocCalls.contains(&CB) &&
5024            !BadMallocCalls.count(&CB);
5025   }
5026 
5027   bool isKnownHeapToStack(CallBase &CB) const override {
5028     return isValidState() && MallocCalls.contains(&CB) &&
5029            !BadMallocCalls.count(&CB);
5030   }
5031 
5032   ChangeStatus manifest(Attributor &A) override {
5033     assert(getState().isValidState() &&
5034            "Attempted to manifest an invalid state!");
5035 
5036     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5037     Function *F = getAnchorScope();
5038     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5039 
5040     for (Instruction *MallocCall : MallocCalls) {
5041       // This malloc cannot be replaced.
5042       if (BadMallocCalls.count(MallocCall))
5043         continue;
5044 
5045       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5046         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5047         A.deleteAfterManifest(*FreeCall);
5048         HasChanged = ChangeStatus::CHANGED;
5049       }
5050 
5051       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5052                         << "\n");
5053 
5054       Align Alignment;
5055       Value *Size;
5056       if (isCallocLikeFn(MallocCall, TLI)) {
5057         auto *Num = MallocCall->getOperand(0);
5058         auto *SizeT = MallocCall->getOperand(1);
5059         IRBuilder<> B(MallocCall);
5060         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5061       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5062         Size = MallocCall->getOperand(1);
5063         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5064                                    ->getValue()
5065                                    .getZExtValue())
5066                         .valueOrOne();
5067       } else {
5068         Size = MallocCall->getOperand(0);
5069       }
5070 
5071       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5072       Instruction *AI =
5073           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5074                          "", MallocCall->getNextNode());
5075 
5076       if (AI->getType() != MallocCall->getType())
5077         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5078                              AI->getNextNode());
5079 
5080       A.changeValueAfterManifest(*MallocCall, *AI);
5081 
5082       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5083         auto *NBB = II->getNormalDest();
5084         BranchInst::Create(NBB, MallocCall->getParent());
5085         A.deleteAfterManifest(*MallocCall);
5086       } else {
5087         A.deleteAfterManifest(*MallocCall);
5088       }
5089 
5090       // Zero out the allocated memory if it was a calloc.
5091       if (isCallocLikeFn(MallocCall, TLI)) {
5092         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5093                                    AI->getNextNode());
5094         Value *Ops[] = {
5095             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5096             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5097 
5098         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5099         Module *M = F->getParent();
5100         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5101         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5102       }
5103       HasChanged = ChangeStatus::CHANGED;
5104     }
5105 
5106     return HasChanged;
5107   }
5108 
5109   /// Collection of all malloc calls in a function.
5110   SmallSetVector<Instruction *, 4> MallocCalls;
5111 
5112   /// Collection of malloc calls that cannot be converted.
5113   DenseSet<const Instruction *> BadMallocCalls;
5114 
5115   /// A map for each malloc call to the set of associated free calls.
5116   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5117 
5118   ChangeStatus updateImpl(Attributor &A) override;
5119 };
5120 
5121 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5122   const Function *F = getAnchorScope();
5123   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5124 
5125   MustBeExecutedContextExplorer &Explorer =
5126       A.getInfoCache().getMustBeExecutedContextExplorer();
5127 
5128   auto FreeCheck = [&](Instruction &I) {
5129     const auto &Frees = FreesForMalloc.lookup(&I);
5130     if (Frees.size() != 1)
5131       return false;
5132     Instruction *UniqueFree = *Frees.begin();
5133     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5134   };
5135 
5136   auto UsesCheck = [&](Instruction &I) {
5137     bool ValidUsesOnly = true;
5138     bool MustUse = true;
5139     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5140       Instruction *UserI = cast<Instruction>(U.getUser());
5141       if (isa<LoadInst>(UserI))
5142         return true;
5143       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5144         if (SI->getValueOperand() == U.get()) {
5145           LLVM_DEBUG(dbgs()
5146                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5147           ValidUsesOnly = false;
5148         } else {
5149           // A store into the malloc'ed memory is fine.
5150         }
5151         return true;
5152       }
5153       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5154         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5155           return true;
5156         // Record malloc.
5157         if (isFreeCall(UserI, TLI)) {
5158           if (MustUse) {
5159             FreesForMalloc[&I].insert(UserI);
5160           } else {
5161             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5162                               << *UserI << "\n");
5163             ValidUsesOnly = false;
5164           }
5165           return true;
5166         }
5167 
5168         unsigned ArgNo = CB->getArgOperandNo(&U);
5169 
5170         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5171             *this, IRPosition::callsite_argument(*CB, ArgNo),
5172             DepClassTy::REQUIRED);
5173 
5174         // If a callsite argument use is nofree, we are fine.
5175         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5176             *this, IRPosition::callsite_argument(*CB, ArgNo),
5177             DepClassTy::REQUIRED);
5178 
5179         if (!NoCaptureAA.isAssumedNoCapture() ||
5180             !ArgNoFreeAA.isAssumedNoFree()) {
5181           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5182           ValidUsesOnly = false;
5183         }
5184         return true;
5185       }
5186 
5187       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5188           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5189         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5190         Follow = true;
5191         return true;
5192       }
5193       // Unknown user for which we can not track uses further (in a way that
5194       // makes sense).
5195       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5196       ValidUsesOnly = false;
5197       return true;
5198     };
5199     A.checkForAllUses(Pred, *this, I);
5200     return ValidUsesOnly;
5201   };
5202 
5203   auto MallocCallocCheck = [&](Instruction &I) {
5204     if (BadMallocCalls.count(&I))
5205       return true;
5206 
5207     bool IsMalloc = isMallocLikeFn(&I, TLI);
5208     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5209     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5210     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5211       BadMallocCalls.insert(&I);
5212       return true;
5213     }
5214 
5215     if (IsMalloc) {
5216       if (MaxHeapToStackSize == -1) {
5217         if (UsesCheck(I) || FreeCheck(I)) {
5218           MallocCalls.insert(&I);
5219           return true;
5220         }
5221       }
5222       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5223         if (Size->getValue().ule(MaxHeapToStackSize))
5224           if (UsesCheck(I) || FreeCheck(I)) {
5225             MallocCalls.insert(&I);
5226             return true;
5227           }
5228     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5229       if (MaxHeapToStackSize == -1) {
5230         if (UsesCheck(I) || FreeCheck(I)) {
5231           MallocCalls.insert(&I);
5232           return true;
5233         }
5234       }
5235       // Only if the alignment and sizes are constant.
5236       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5237         if (Size->getValue().ule(MaxHeapToStackSize))
5238           if (UsesCheck(I) || FreeCheck(I)) {
5239             MallocCalls.insert(&I);
5240             return true;
5241           }
5242     } else if (IsCalloc) {
5243       if (MaxHeapToStackSize == -1) {
5244         if (UsesCheck(I) || FreeCheck(I)) {
5245           MallocCalls.insert(&I);
5246           return true;
5247         }
5248       }
5249       bool Overflow = false;
5250       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5251         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5252           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5253                   .ule(MaxHeapToStackSize))
5254             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5255               MallocCalls.insert(&I);
5256               return true;
5257             }
5258     }
5259 
5260     BadMallocCalls.insert(&I);
5261     return true;
5262   };
5263 
5264   size_t NumBadMallocs = BadMallocCalls.size();
5265 
5266   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5267 
5268   if (NumBadMallocs != BadMallocCalls.size())
5269     return ChangeStatus::CHANGED;
5270 
5271   return ChangeStatus::UNCHANGED;
5272 }
5273 
5274 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5275   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5276       : AAHeapToStackImpl(IRP, A) {}
5277 
5278   /// See AbstractAttribute::trackStatistics().
5279   void trackStatistics() const override {
5280     STATS_DECL(
5281         MallocCalls, Function,
5282         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5283     for (auto *C : MallocCalls)
5284       if (!BadMallocCalls.count(C))
5285         ++BUILD_STAT_NAME(MallocCalls, Function);
5286   }
5287 };
5288 
5289 /// ----------------------- Privatizable Pointers ------------------------------
5290 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5291   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5292       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5293 
5294   ChangeStatus indicatePessimisticFixpoint() override {
5295     AAPrivatizablePtr::indicatePessimisticFixpoint();
5296     PrivatizableType = nullptr;
5297     return ChangeStatus::CHANGED;
5298   }
5299 
5300   /// Identify the type we can chose for a private copy of the underlying
5301   /// argument. None means it is not clear yet, nullptr means there is none.
5302   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5303 
5304   /// Return a privatizable type that encloses both T0 and T1.
5305   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5306   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5307     if (!T0.hasValue())
5308       return T1;
5309     if (!T1.hasValue())
5310       return T0;
5311     if (T0 == T1)
5312       return T0;
5313     return nullptr;
5314   }
5315 
5316   Optional<Type *> getPrivatizableType() const override {
5317     return PrivatizableType;
5318   }
5319 
5320   const std::string getAsStr() const override {
5321     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5322   }
5323 
5324 protected:
5325   Optional<Type *> PrivatizableType;
5326 };
5327 
5328 // TODO: Do this for call site arguments (probably also other values) as well.
5329 
5330 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5331   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5332       : AAPrivatizablePtrImpl(IRP, A) {}
5333 
5334   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5335   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5336     // If this is a byval argument and we know all the call sites (so we can
5337     // rewrite them), there is no need to check them explicitly.
5338     bool AllCallSitesKnown;
5339     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5340         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5341                                true, AllCallSitesKnown))
5342       return getAssociatedValue().getType()->getPointerElementType();
5343 
5344     Optional<Type *> Ty;
5345     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5346 
5347     // Make sure the associated call site argument has the same type at all call
5348     // sites and it is an allocation we know is safe to privatize, for now that
5349     // means we only allow alloca instructions.
5350     // TODO: We can additionally analyze the accesses in the callee to  create
5351     //       the type from that information instead. That is a little more
5352     //       involved and will be done in a follow up patch.
5353     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5354       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5355       // Check if a coresponding argument was found or if it is one not
5356       // associated (which can happen for callback calls).
5357       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5358         return false;
5359 
5360       // Check that all call sites agree on a type.
5361       auto &PrivCSArgAA =
5362           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5363       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5364 
5365       LLVM_DEBUG({
5366         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5367         if (CSTy.hasValue() && CSTy.getValue())
5368           CSTy.getValue()->print(dbgs());
5369         else if (CSTy.hasValue())
5370           dbgs() << "<nullptr>";
5371         else
5372           dbgs() << "<none>";
5373       });
5374 
5375       Ty = combineTypes(Ty, CSTy);
5376 
5377       LLVM_DEBUG({
5378         dbgs() << " : New Type: ";
5379         if (Ty.hasValue() && Ty.getValue())
5380           Ty.getValue()->print(dbgs());
5381         else if (Ty.hasValue())
5382           dbgs() << "<nullptr>";
5383         else
5384           dbgs() << "<none>";
5385         dbgs() << "\n";
5386       });
5387 
5388       return !Ty.hasValue() || Ty.getValue();
5389     };
5390 
5391     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5392       return nullptr;
5393     return Ty;
5394   }
5395 
5396   /// See AbstractAttribute::updateImpl(...).
5397   ChangeStatus updateImpl(Attributor &A) override {
5398     PrivatizableType = identifyPrivatizableType(A);
5399     if (!PrivatizableType.hasValue())
5400       return ChangeStatus::UNCHANGED;
5401     if (!PrivatizableType.getValue())
5402       return indicatePessimisticFixpoint();
5403 
5404     // The dependence is optional so we don't give up once we give up on the
5405     // alignment.
5406     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5407                         DepClassTy::OPTIONAL);
5408 
5409     // Avoid arguments with padding for now.
5410     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5411         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5412                                                 A.getInfoCache().getDL())) {
5413       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5414       return indicatePessimisticFixpoint();
5415     }
5416 
5417     // Verify callee and caller agree on how the promoted argument would be
5418     // passed.
5419     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5420     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5421     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5422     Function &Fn = *getIRPosition().getAnchorScope();
5423     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5424     ArgsToPromote.insert(getAssociatedArgument());
5425     const auto *TTI =
5426         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5427     if (!TTI ||
5428         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5429             Fn, *TTI, ArgsToPromote, Dummy) ||
5430         ArgsToPromote.empty()) {
5431       LLVM_DEBUG(
5432           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5433                  << Fn.getName() << "\n");
5434       return indicatePessimisticFixpoint();
5435     }
5436 
5437     // Collect the types that will replace the privatizable type in the function
5438     // signature.
5439     SmallVector<Type *, 16> ReplacementTypes;
5440     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5441 
5442     // Register a rewrite of the argument.
5443     Argument *Arg = getAssociatedArgument();
5444     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5445       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5446       return indicatePessimisticFixpoint();
5447     }
5448 
5449     unsigned ArgNo = Arg->getArgNo();
5450 
5451     // Helper to check if for the given call site the associated argument is
5452     // passed to a callback where the privatization would be different.
5453     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5454       SmallVector<const Use *, 4> CallbackUses;
5455       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5456       for (const Use *U : CallbackUses) {
5457         AbstractCallSite CBACS(U);
5458         assert(CBACS && CBACS.isCallbackCall());
5459         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5460           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5461 
5462           LLVM_DEBUG({
5463             dbgs()
5464                 << "[AAPrivatizablePtr] Argument " << *Arg
5465                 << "check if can be privatized in the context of its parent ("
5466                 << Arg->getParent()->getName()
5467                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5468                    "callback ("
5469                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5470                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5471                 << CBACS.getCallArgOperand(CBArg) << " vs "
5472                 << CB.getArgOperand(ArgNo) << "\n"
5473                 << "[AAPrivatizablePtr] " << CBArg << " : "
5474                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5475           });
5476 
5477           if (CBArgNo != int(ArgNo))
5478             continue;
5479           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5480               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5481           if (CBArgPrivAA.isValidState()) {
5482             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5483             if (!CBArgPrivTy.hasValue())
5484               continue;
5485             if (CBArgPrivTy.getValue() == PrivatizableType)
5486               continue;
5487           }
5488 
5489           LLVM_DEBUG({
5490             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5491                    << " cannot be privatized in the context of its parent ("
5492                    << Arg->getParent()->getName()
5493                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5494                       "callback ("
5495                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5496                    << ").\n[AAPrivatizablePtr] for which the argument "
5497                       "privatization is not compatible.\n";
5498           });
5499           return false;
5500         }
5501       }
5502       return true;
5503     };
5504 
5505     // Helper to check if for the given call site the associated argument is
5506     // passed to a direct call where the privatization would be different.
5507     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5508       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5509       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5510       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5511              "Expected a direct call operand for callback call operand");
5512 
5513       LLVM_DEBUG({
5514         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5515                << " check if be privatized in the context of its parent ("
5516                << Arg->getParent()->getName()
5517                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5518                   "direct call of ("
5519                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5520                << ").\n";
5521       });
5522 
5523       Function *DCCallee = DC->getCalledFunction();
5524       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5525         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5526             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5527             DepClassTy::REQUIRED);
5528         if (DCArgPrivAA.isValidState()) {
5529           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5530           if (!DCArgPrivTy.hasValue())
5531             return true;
5532           if (DCArgPrivTy.getValue() == PrivatizableType)
5533             return true;
5534         }
5535       }
5536 
5537       LLVM_DEBUG({
5538         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5539                << " cannot be privatized in the context of its parent ("
5540                << Arg->getParent()->getName()
5541                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5542                   "direct call of ("
5543                << ACS.getInstruction()->getCalledFunction()->getName()
5544                << ").\n[AAPrivatizablePtr] for which the argument "
5545                   "privatization is not compatible.\n";
5546       });
5547       return false;
5548     };
5549 
5550     // Helper to check if the associated argument is used at the given abstract
5551     // call site in a way that is incompatible with the privatization assumed
5552     // here.
5553     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5554       if (ACS.isDirectCall())
5555         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5556       if (ACS.isCallbackCall())
5557         return IsCompatiblePrivArgOfDirectCS(ACS);
5558       return false;
5559     };
5560 
5561     bool AllCallSitesKnown;
5562     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5563                                 AllCallSitesKnown))
5564       return indicatePessimisticFixpoint();
5565 
5566     return ChangeStatus::UNCHANGED;
5567   }
5568 
5569   /// Given a type to private \p PrivType, collect the constituates (which are
5570   /// used) in \p ReplacementTypes.
5571   static void
5572   identifyReplacementTypes(Type *PrivType,
5573                            SmallVectorImpl<Type *> &ReplacementTypes) {
5574     // TODO: For now we expand the privatization type to the fullest which can
5575     //       lead to dead arguments that need to be removed later.
5576     assert(PrivType && "Expected privatizable type!");
5577 
5578     // Traverse the type, extract constituate types on the outermost level.
5579     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5580       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5581         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5582     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5583       ReplacementTypes.append(PrivArrayType->getNumElements(),
5584                               PrivArrayType->getElementType());
5585     } else {
5586       ReplacementTypes.push_back(PrivType);
5587     }
5588   }
5589 
5590   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5591   /// The values needed are taken from the arguments of \p F starting at
5592   /// position \p ArgNo.
5593   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5594                                    unsigned ArgNo, Instruction &IP) {
5595     assert(PrivType && "Expected privatizable type!");
5596 
5597     IRBuilder<NoFolder> IRB(&IP);
5598     const DataLayout &DL = F.getParent()->getDataLayout();
5599 
5600     // Traverse the type, build GEPs and stores.
5601     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5602       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5603       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5604         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5605         Value *Ptr =
5606             constructPointer(PointeeTy, PrivType, &Base,
5607                              PrivStructLayout->getElementOffset(u), IRB, DL);
5608         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5609       }
5610     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5611       Type *PointeeTy = PrivArrayType->getElementType();
5612       Type *PointeePtrTy = PointeeTy->getPointerTo();
5613       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5614       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5615         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5616                                       u * PointeeTySize, IRB, DL);
5617         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5618       }
5619     } else {
5620       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5621     }
5622   }
5623 
5624   /// Extract values from \p Base according to the type \p PrivType at the
5625   /// call position \p ACS. The values are appended to \p ReplacementValues.
5626   void createReplacementValues(Align Alignment, Type *PrivType,
5627                                AbstractCallSite ACS, Value *Base,
5628                                SmallVectorImpl<Value *> &ReplacementValues) {
5629     assert(Base && "Expected base value!");
5630     assert(PrivType && "Expected privatizable type!");
5631     Instruction *IP = ACS.getInstruction();
5632 
5633     IRBuilder<NoFolder> IRB(IP);
5634     const DataLayout &DL = IP->getModule()->getDataLayout();
5635 
5636     if (Base->getType()->getPointerElementType() != PrivType)
5637       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5638                                                  "", ACS.getInstruction());
5639 
5640     // Traverse the type, build GEPs and loads.
5641     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5642       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5643       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5644         Type *PointeeTy = PrivStructType->getElementType(u);
5645         Value *Ptr =
5646             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5647                              PrivStructLayout->getElementOffset(u), IRB, DL);
5648         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5649         L->setAlignment(Alignment);
5650         ReplacementValues.push_back(L);
5651       }
5652     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5653       Type *PointeeTy = PrivArrayType->getElementType();
5654       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5655       Type *PointeePtrTy = PointeeTy->getPointerTo();
5656       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5657         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5658                                       u * PointeeTySize, IRB, DL);
5659         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5660         L->setAlignment(Alignment);
5661         ReplacementValues.push_back(L);
5662       }
5663     } else {
5664       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5665       L->setAlignment(Alignment);
5666       ReplacementValues.push_back(L);
5667     }
5668   }
5669 
5670   /// See AbstractAttribute::manifest(...)
5671   ChangeStatus manifest(Attributor &A) override {
5672     if (!PrivatizableType.hasValue())
5673       return ChangeStatus::UNCHANGED;
5674     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5675 
5676     // Collect all tail calls in the function as we cannot allow new allocas to
5677     // escape into tail recursion.
5678     // TODO: Be smarter about new allocas escaping into tail calls.
5679     SmallVector<CallInst *, 16> TailCalls;
5680     if (!A.checkForAllInstructions(
5681             [&](Instruction &I) {
5682               CallInst &CI = cast<CallInst>(I);
5683               if (CI.isTailCall())
5684                 TailCalls.push_back(&CI);
5685               return true;
5686             },
5687             *this, {Instruction::Call}))
5688       return ChangeStatus::UNCHANGED;
5689 
5690     Argument *Arg = getAssociatedArgument();
5691     // Query AAAlign attribute for alignment of associated argument to
5692     // determine the best alignment of loads.
5693     const auto &AlignAA =
5694         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5695 
5696     // Callback to repair the associated function. A new alloca is placed at the
5697     // beginning and initialized with the values passed through arguments. The
5698     // new alloca replaces the use of the old pointer argument.
5699     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5700         [=](const Attributor::ArgumentReplacementInfo &ARI,
5701             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5702           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5703           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5704           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5705                                            Arg->getName() + ".priv", IP);
5706           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5707                                ArgIt->getArgNo(), *IP);
5708 
5709           if (AI->getType() != Arg->getType())
5710             AI =
5711                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5712           Arg->replaceAllUsesWith(AI);
5713 
5714           for (CallInst *CI : TailCalls)
5715             CI->setTailCall(false);
5716         };
5717 
5718     // Callback to repair a call site of the associated function. The elements
5719     // of the privatizable type are loaded prior to the call and passed to the
5720     // new function version.
5721     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5722         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5723                       AbstractCallSite ACS,
5724                       SmallVectorImpl<Value *> &NewArgOperands) {
5725           // When no alignment is specified for the load instruction,
5726           // natural alignment is assumed.
5727           createReplacementValues(
5728               assumeAligned(AlignAA.getAssumedAlign()),
5729               PrivatizableType.getValue(), ACS,
5730               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5731               NewArgOperands);
5732         };
5733 
5734     // Collect the types that will replace the privatizable type in the function
5735     // signature.
5736     SmallVector<Type *, 16> ReplacementTypes;
5737     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5738 
5739     // Register a rewrite of the argument.
5740     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5741                                            std::move(FnRepairCB),
5742                                            std::move(ACSRepairCB)))
5743       return ChangeStatus::CHANGED;
5744     return ChangeStatus::UNCHANGED;
5745   }
5746 
5747   /// See AbstractAttribute::trackStatistics()
5748   void trackStatistics() const override {
5749     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5750   }
5751 };
5752 
5753 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5754   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5755       : AAPrivatizablePtrImpl(IRP, A) {}
5756 
5757   /// See AbstractAttribute::initialize(...).
5758   virtual void initialize(Attributor &A) override {
5759     // TODO: We can privatize more than arguments.
5760     indicatePessimisticFixpoint();
5761   }
5762 
5763   ChangeStatus updateImpl(Attributor &A) override {
5764     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5765                      "updateImpl will not be called");
5766   }
5767 
5768   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5769   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5770     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5771     if (!Obj) {
5772       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5773       return nullptr;
5774     }
5775 
5776     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5777       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5778         if (CI->isOne())
5779           return Obj->getType()->getPointerElementType();
5780     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5781       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5782           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5783       if (PrivArgAA.isAssumedPrivatizablePtr())
5784         return Obj->getType()->getPointerElementType();
5785     }
5786 
5787     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5788                          "alloca nor privatizable argument: "
5789                       << *Obj << "!\n");
5790     return nullptr;
5791   }
5792 
5793   /// See AbstractAttribute::trackStatistics()
5794   void trackStatistics() const override {
5795     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5796   }
5797 };
5798 
5799 struct AAPrivatizablePtrCallSiteArgument final
5800     : public AAPrivatizablePtrFloating {
5801   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5802       : AAPrivatizablePtrFloating(IRP, A) {}
5803 
5804   /// See AbstractAttribute::initialize(...).
5805   void initialize(Attributor &A) override {
5806     if (getIRPosition().hasAttr(Attribute::ByVal))
5807       indicateOptimisticFixpoint();
5808   }
5809 
5810   /// See AbstractAttribute::updateImpl(...).
5811   ChangeStatus updateImpl(Attributor &A) override {
5812     PrivatizableType = identifyPrivatizableType(A);
5813     if (!PrivatizableType.hasValue())
5814       return ChangeStatus::UNCHANGED;
5815     if (!PrivatizableType.getValue())
5816       return indicatePessimisticFixpoint();
5817 
5818     const IRPosition &IRP = getIRPosition();
5819     auto &NoCaptureAA =
5820         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5821     if (!NoCaptureAA.isAssumedNoCapture()) {
5822       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5823       return indicatePessimisticFixpoint();
5824     }
5825 
5826     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5827     if (!NoAliasAA.isAssumedNoAlias()) {
5828       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5829       return indicatePessimisticFixpoint();
5830     }
5831 
5832     const auto &MemBehaviorAA =
5833         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5834     if (!MemBehaviorAA.isAssumedReadOnly()) {
5835       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5836       return indicatePessimisticFixpoint();
5837     }
5838 
5839     return ChangeStatus::UNCHANGED;
5840   }
5841 
5842   /// See AbstractAttribute::trackStatistics()
5843   void trackStatistics() const override {
5844     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5845   }
5846 };
5847 
5848 struct AAPrivatizablePtrCallSiteReturned final
5849     : public AAPrivatizablePtrFloating {
5850   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5851       : AAPrivatizablePtrFloating(IRP, A) {}
5852 
5853   /// See AbstractAttribute::initialize(...).
5854   void initialize(Attributor &A) override {
5855     // TODO: We can privatize more than arguments.
5856     indicatePessimisticFixpoint();
5857   }
5858 
5859   /// See AbstractAttribute::trackStatistics()
5860   void trackStatistics() const override {
5861     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5862   }
5863 };
5864 
5865 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5866   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5867       : AAPrivatizablePtrFloating(IRP, A) {}
5868 
5869   /// See AbstractAttribute::initialize(...).
5870   void initialize(Attributor &A) override {
5871     // TODO: We can privatize more than arguments.
5872     indicatePessimisticFixpoint();
5873   }
5874 
5875   /// See AbstractAttribute::trackStatistics()
5876   void trackStatistics() const override {
5877     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5878   }
5879 };
5880 
5881 /// -------------------- Memory Behavior Attributes ----------------------------
5882 /// Includes read-none, read-only, and write-only.
5883 /// ----------------------------------------------------------------------------
5884 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5885   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5886       : AAMemoryBehavior(IRP, A) {}
5887 
5888   /// See AbstractAttribute::initialize(...).
5889   void initialize(Attributor &A) override {
5890     intersectAssumedBits(BEST_STATE);
5891     getKnownStateFromValue(getIRPosition(), getState());
5892     AAMemoryBehavior::initialize(A);
5893   }
5894 
5895   /// Return the memory behavior information encoded in the IR for \p IRP.
5896   static void getKnownStateFromValue(const IRPosition &IRP,
5897                                      BitIntegerState &State,
5898                                      bool IgnoreSubsumingPositions = false) {
5899     SmallVector<Attribute, 2> Attrs;
5900     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5901     for (const Attribute &Attr : Attrs) {
5902       switch (Attr.getKindAsEnum()) {
5903       case Attribute::ReadNone:
5904         State.addKnownBits(NO_ACCESSES);
5905         break;
5906       case Attribute::ReadOnly:
5907         State.addKnownBits(NO_WRITES);
5908         break;
5909       case Attribute::WriteOnly:
5910         State.addKnownBits(NO_READS);
5911         break;
5912       default:
5913         llvm_unreachable("Unexpected attribute!");
5914       }
5915     }
5916 
5917     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5918       if (!I->mayReadFromMemory())
5919         State.addKnownBits(NO_READS);
5920       if (!I->mayWriteToMemory())
5921         State.addKnownBits(NO_WRITES);
5922     }
5923   }
5924 
5925   /// See AbstractAttribute::getDeducedAttributes(...).
5926   void getDeducedAttributes(LLVMContext &Ctx,
5927                             SmallVectorImpl<Attribute> &Attrs) const override {
5928     assert(Attrs.size() == 0);
5929     if (isAssumedReadNone())
5930       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5931     else if (isAssumedReadOnly())
5932       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5933     else if (isAssumedWriteOnly())
5934       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5935     assert(Attrs.size() <= 1);
5936   }
5937 
5938   /// See AbstractAttribute::manifest(...).
5939   ChangeStatus manifest(Attributor &A) override {
5940     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5941       return ChangeStatus::UNCHANGED;
5942 
5943     const IRPosition &IRP = getIRPosition();
5944 
5945     // Check if we would improve the existing attributes first.
5946     SmallVector<Attribute, 4> DeducedAttrs;
5947     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5948     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5949           return IRP.hasAttr(Attr.getKindAsEnum(),
5950                              /* IgnoreSubsumingPositions */ true);
5951         }))
5952       return ChangeStatus::UNCHANGED;
5953 
5954     // Clear existing attributes.
5955     IRP.removeAttrs(AttrKinds);
5956 
5957     // Use the generic manifest method.
5958     return IRAttribute::manifest(A);
5959   }
5960 
5961   /// See AbstractState::getAsStr().
5962   const std::string getAsStr() const override {
5963     if (isAssumedReadNone())
5964       return "readnone";
5965     if (isAssumedReadOnly())
5966       return "readonly";
5967     if (isAssumedWriteOnly())
5968       return "writeonly";
5969     return "may-read/write";
5970   }
5971 
5972   /// The set of IR attributes AAMemoryBehavior deals with.
5973   static const Attribute::AttrKind AttrKinds[3];
5974 };
5975 
5976 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5977     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5978 
5979 /// Memory behavior attribute for a floating value.
5980 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5981   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5982       : AAMemoryBehaviorImpl(IRP, A) {}
5983 
5984   /// See AbstractAttribute::initialize(...).
5985   void initialize(Attributor &A) override {
5986     AAMemoryBehaviorImpl::initialize(A);
5987     addUsesOf(A, getAssociatedValue());
5988   }
5989 
5990   /// See AbstractAttribute::updateImpl(...).
5991   ChangeStatus updateImpl(Attributor &A) override;
5992 
5993   /// See AbstractAttribute::trackStatistics()
5994   void trackStatistics() const override {
5995     if (isAssumedReadNone())
5996       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5997     else if (isAssumedReadOnly())
5998       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5999     else if (isAssumedWriteOnly())
6000       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6001   }
6002 
6003 private:
6004   /// Return true if users of \p UserI might access the underlying
6005   /// variable/location described by \p U and should therefore be analyzed.
6006   bool followUsersOfUseIn(Attributor &A, const Use *U,
6007                           const Instruction *UserI);
6008 
6009   /// Update the state according to the effect of use \p U in \p UserI.
6010   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6011 
6012 protected:
6013   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6014   void addUsesOf(Attributor &A, const Value &V);
6015 
6016   /// Container for (transitive) uses of the associated argument.
6017   SmallVector<const Use *, 8> Uses;
6018 
6019   /// Set to remember the uses we already traversed.
6020   SmallPtrSet<const Use *, 8> Visited;
6021 };
6022 
6023 /// Memory behavior attribute for function argument.
6024 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6025   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6026       : AAMemoryBehaviorFloating(IRP, A) {}
6027 
6028   /// See AbstractAttribute::initialize(...).
6029   void initialize(Attributor &A) override {
6030     intersectAssumedBits(BEST_STATE);
6031     const IRPosition &IRP = getIRPosition();
6032     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6033     // can query it when we use has/getAttr. That would allow us to reuse the
6034     // initialize of the base class here.
6035     bool HasByVal =
6036         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6037     getKnownStateFromValue(IRP, getState(),
6038                            /* IgnoreSubsumingPositions */ HasByVal);
6039 
6040     // Initialize the use vector with all direct uses of the associated value.
6041     Argument *Arg = getAssociatedArgument();
6042     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6043       indicatePessimisticFixpoint();
6044     } else {
6045       addUsesOf(A, *Arg);
6046     }
6047   }
6048 
6049   ChangeStatus manifest(Attributor &A) override {
6050     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6051     if (!getAssociatedValue().getType()->isPointerTy())
6052       return ChangeStatus::UNCHANGED;
6053 
6054     // TODO: From readattrs.ll: "inalloca parameters are always
6055     //                           considered written"
6056     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6057       removeKnownBits(NO_WRITES);
6058       removeAssumedBits(NO_WRITES);
6059     }
6060     return AAMemoryBehaviorFloating::manifest(A);
6061   }
6062 
6063   /// See AbstractAttribute::trackStatistics()
6064   void trackStatistics() const override {
6065     if (isAssumedReadNone())
6066       STATS_DECLTRACK_ARG_ATTR(readnone)
6067     else if (isAssumedReadOnly())
6068       STATS_DECLTRACK_ARG_ATTR(readonly)
6069     else if (isAssumedWriteOnly())
6070       STATS_DECLTRACK_ARG_ATTR(writeonly)
6071   }
6072 };
6073 
6074 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6075   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6076       : AAMemoryBehaviorArgument(IRP, A) {}
6077 
6078   /// See AbstractAttribute::initialize(...).
6079   void initialize(Attributor &A) override {
6080     // If we don't have an associated attribute this is either a variadic call
6081     // or an indirect call, either way, nothing to do here.
6082     Argument *Arg = getAssociatedArgument();
6083     if (!Arg) {
6084       indicatePessimisticFixpoint();
6085       return;
6086     }
6087     if (Arg->hasByValAttr()) {
6088       addKnownBits(NO_WRITES);
6089       removeKnownBits(NO_READS);
6090       removeAssumedBits(NO_READS);
6091     }
6092     AAMemoryBehaviorArgument::initialize(A);
6093     if (getAssociatedFunction()->isDeclaration())
6094       indicatePessimisticFixpoint();
6095   }
6096 
6097   /// See AbstractAttribute::updateImpl(...).
6098   ChangeStatus updateImpl(Attributor &A) override {
6099     // TODO: Once we have call site specific value information we can provide
6100     //       call site specific liveness liveness information and then it makes
6101     //       sense to specialize attributes for call sites arguments instead of
6102     //       redirecting requests to the callee argument.
6103     Argument *Arg = getAssociatedArgument();
6104     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6105     auto &ArgAA =
6106         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6107     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6108   }
6109 
6110   /// See AbstractAttribute::trackStatistics()
6111   void trackStatistics() const override {
6112     if (isAssumedReadNone())
6113       STATS_DECLTRACK_CSARG_ATTR(readnone)
6114     else if (isAssumedReadOnly())
6115       STATS_DECLTRACK_CSARG_ATTR(readonly)
6116     else if (isAssumedWriteOnly())
6117       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6118   }
6119 };
6120 
6121 /// Memory behavior attribute for a call site return position.
6122 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6123   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6124       : AAMemoryBehaviorFloating(IRP, A) {}
6125 
6126   /// See AbstractAttribute::initialize(...).
6127   void initialize(Attributor &A) override {
6128     AAMemoryBehaviorImpl::initialize(A);
6129     Function *F = getAssociatedFunction();
6130     if (!F || F->isDeclaration())
6131       indicatePessimisticFixpoint();
6132   }
6133 
6134   /// See AbstractAttribute::manifest(...).
6135   ChangeStatus manifest(Attributor &A) override {
6136     // We do not annotate returned values.
6137     return ChangeStatus::UNCHANGED;
6138   }
6139 
6140   /// See AbstractAttribute::trackStatistics()
6141   void trackStatistics() const override {}
6142 };
6143 
6144 /// An AA to represent the memory behavior function attributes.
6145 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6146   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6147       : AAMemoryBehaviorImpl(IRP, A) {}
6148 
6149   /// See AbstractAttribute::updateImpl(Attributor &A).
6150   virtual ChangeStatus updateImpl(Attributor &A) override;
6151 
6152   /// See AbstractAttribute::manifest(...).
6153   ChangeStatus manifest(Attributor &A) override {
6154     Function &F = cast<Function>(getAnchorValue());
6155     if (isAssumedReadNone()) {
6156       F.removeFnAttr(Attribute::ArgMemOnly);
6157       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6158       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6159     }
6160     return AAMemoryBehaviorImpl::manifest(A);
6161   }
6162 
6163   /// See AbstractAttribute::trackStatistics()
6164   void trackStatistics() const override {
6165     if (isAssumedReadNone())
6166       STATS_DECLTRACK_FN_ATTR(readnone)
6167     else if (isAssumedReadOnly())
6168       STATS_DECLTRACK_FN_ATTR(readonly)
6169     else if (isAssumedWriteOnly())
6170       STATS_DECLTRACK_FN_ATTR(writeonly)
6171   }
6172 };
6173 
6174 /// AAMemoryBehavior attribute for call sites.
6175 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6176   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6177       : AAMemoryBehaviorImpl(IRP, A) {}
6178 
6179   /// See AbstractAttribute::initialize(...).
6180   void initialize(Attributor &A) override {
6181     AAMemoryBehaviorImpl::initialize(A);
6182     Function *F = getAssociatedFunction();
6183     if (!F || F->isDeclaration())
6184       indicatePessimisticFixpoint();
6185   }
6186 
6187   /// See AbstractAttribute::updateImpl(...).
6188   ChangeStatus updateImpl(Attributor &A) override {
6189     // TODO: Once we have call site specific value information we can provide
6190     //       call site specific liveness liveness information and then it makes
6191     //       sense to specialize attributes for call sites arguments instead of
6192     //       redirecting requests to the callee argument.
6193     Function *F = getAssociatedFunction();
6194     const IRPosition &FnPos = IRPosition::function(*F);
6195     auto &FnAA =
6196         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6197     return clampStateAndIndicateChange(getState(), FnAA.getState());
6198   }
6199 
6200   /// See AbstractAttribute::trackStatistics()
6201   void trackStatistics() const override {
6202     if (isAssumedReadNone())
6203       STATS_DECLTRACK_CS_ATTR(readnone)
6204     else if (isAssumedReadOnly())
6205       STATS_DECLTRACK_CS_ATTR(readonly)
6206     else if (isAssumedWriteOnly())
6207       STATS_DECLTRACK_CS_ATTR(writeonly)
6208   }
6209 };
6210 
6211 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6212 
6213   // The current assumed state used to determine a change.
6214   auto AssumedState = getAssumed();
6215 
6216   auto CheckRWInst = [&](Instruction &I) {
6217     // If the instruction has an own memory behavior state, use it to restrict
6218     // the local state. No further analysis is required as the other memory
6219     // state is as optimistic as it gets.
6220     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6221       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6222           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6223       intersectAssumedBits(MemBehaviorAA.getAssumed());
6224       return !isAtFixpoint();
6225     }
6226 
6227     // Remove access kind modifiers if necessary.
6228     if (I.mayReadFromMemory())
6229       removeAssumedBits(NO_READS);
6230     if (I.mayWriteToMemory())
6231       removeAssumedBits(NO_WRITES);
6232     return !isAtFixpoint();
6233   };
6234 
6235   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6236     return indicatePessimisticFixpoint();
6237 
6238   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6239                                         : ChangeStatus::UNCHANGED;
6240 }
6241 
6242 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6243 
6244   const IRPosition &IRP = getIRPosition();
6245   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6246   AAMemoryBehavior::StateType &S = getState();
6247 
6248   // First, check the function scope. We take the known information and we avoid
6249   // work if the assumed information implies the current assumed information for
6250   // this attribute. This is a valid for all but byval arguments.
6251   Argument *Arg = IRP.getAssociatedArgument();
6252   AAMemoryBehavior::base_t FnMemAssumedState =
6253       AAMemoryBehavior::StateType::getWorstState();
6254   if (!Arg || !Arg->hasByValAttr()) {
6255     const auto &FnMemAA =
6256         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6257     FnMemAssumedState = FnMemAA.getAssumed();
6258     S.addKnownBits(FnMemAA.getKnown());
6259     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6260       return ChangeStatus::UNCHANGED;
6261   }
6262 
6263   // Make sure the value is not captured (except through "return"), if
6264   // it is, any information derived would be irrelevant anyway as we cannot
6265   // check the potential aliases introduced by the capture. However, no need
6266   // to fall back to anythign less optimistic than the function state.
6267   const auto &ArgNoCaptureAA =
6268       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6269   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6270     S.intersectAssumedBits(FnMemAssumedState);
6271     return ChangeStatus::CHANGED;
6272   }
6273 
6274   // The current assumed state used to determine a change.
6275   auto AssumedState = S.getAssumed();
6276 
6277   // Liveness information to exclude dead users.
6278   // TODO: Take the FnPos once we have call site specific liveness information.
6279   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6280       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6281       DepClassTy::NONE);
6282 
6283   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6284   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6285     const Use *U = Uses[i];
6286     Instruction *UserI = cast<Instruction>(U->getUser());
6287     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6288                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6289                       << "]\n");
6290     if (A.isAssumedDead(*U, this, &LivenessAA))
6291       continue;
6292 
6293     // Droppable users, e.g., llvm::assume does not actually perform any action.
6294     if (UserI->isDroppable())
6295       continue;
6296 
6297     // Check if the users of UserI should also be visited.
6298     if (followUsersOfUseIn(A, U, UserI))
6299       addUsesOf(A, *UserI);
6300 
6301     // If UserI might touch memory we analyze the use in detail.
6302     if (UserI->mayReadOrWriteMemory())
6303       analyzeUseIn(A, U, UserI);
6304   }
6305 
6306   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6307                                         : ChangeStatus::UNCHANGED;
6308 }
6309 
6310 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6311   SmallVector<const Use *, 8> WL;
6312   for (const Use &U : V.uses())
6313     WL.push_back(&U);
6314 
6315   while (!WL.empty()) {
6316     const Use *U = WL.pop_back_val();
6317     if (!Visited.insert(U).second)
6318       continue;
6319 
6320     const Instruction *UserI = cast<Instruction>(U->getUser());
6321     if (UserI->mayReadOrWriteMemory()) {
6322       Uses.push_back(U);
6323       continue;
6324     }
6325     if (!followUsersOfUseIn(A, U, UserI))
6326       continue;
6327     for (const Use &UU : UserI->uses())
6328       WL.push_back(&UU);
6329   }
6330 }
6331 
6332 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6333                                                   const Instruction *UserI) {
6334   // The loaded value is unrelated to the pointer argument, no need to
6335   // follow the users of the load.
6336   if (isa<LoadInst>(UserI))
6337     return false;
6338 
6339   // By default we follow all uses assuming UserI might leak information on U,
6340   // we have special handling for call sites operands though.
6341   const auto *CB = dyn_cast<CallBase>(UserI);
6342   if (!CB || !CB->isArgOperand(U))
6343     return true;
6344 
6345   // If the use is a call argument known not to be captured, the users of
6346   // the call do not need to be visited because they have to be unrelated to
6347   // the input. Note that this check is not trivial even though we disallow
6348   // general capturing of the underlying argument. The reason is that the
6349   // call might the argument "through return", which we allow and for which we
6350   // need to check call users.
6351   if (U->get()->getType()->isPointerTy()) {
6352     unsigned ArgNo = CB->getArgOperandNo(U);
6353     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6354         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6355     return !ArgNoCaptureAA.isAssumedNoCapture();
6356   }
6357 
6358   return true;
6359 }
6360 
6361 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6362                                             const Instruction *UserI) {
6363   assert(UserI->mayReadOrWriteMemory());
6364 
6365   switch (UserI->getOpcode()) {
6366   default:
6367     // TODO: Handle all atomics and other side-effect operations we know of.
6368     break;
6369   case Instruction::Load:
6370     // Loads cause the NO_READS property to disappear.
6371     removeAssumedBits(NO_READS);
6372     return;
6373 
6374   case Instruction::Store:
6375     // Stores cause the NO_WRITES property to disappear if the use is the
6376     // pointer operand. Note that we do assume that capturing was taken care of
6377     // somewhere else.
6378     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6379       removeAssumedBits(NO_WRITES);
6380     return;
6381 
6382   case Instruction::Call:
6383   case Instruction::CallBr:
6384   case Instruction::Invoke: {
6385     // For call sites we look at the argument memory behavior attribute (this
6386     // could be recursive!) in order to restrict our own state.
6387     const auto *CB = cast<CallBase>(UserI);
6388 
6389     // Give up on operand bundles.
6390     if (CB->isBundleOperand(U)) {
6391       indicatePessimisticFixpoint();
6392       return;
6393     }
6394 
6395     // Calling a function does read the function pointer, maybe write it if the
6396     // function is self-modifying.
6397     if (CB->isCallee(U)) {
6398       removeAssumedBits(NO_READS);
6399       break;
6400     }
6401 
6402     // Adjust the possible access behavior based on the information on the
6403     // argument.
6404     IRPosition Pos;
6405     if (U->get()->getType()->isPointerTy())
6406       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6407     else
6408       Pos = IRPosition::callsite_function(*CB);
6409     const auto &MemBehaviorAA =
6410         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6411     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6412     // and at least "known".
6413     intersectAssumedBits(MemBehaviorAA.getAssumed());
6414     return;
6415   }
6416   };
6417 
6418   // Generally, look at the "may-properties" and adjust the assumed state if we
6419   // did not trigger special handling before.
6420   if (UserI->mayReadFromMemory())
6421     removeAssumedBits(NO_READS);
6422   if (UserI->mayWriteToMemory())
6423     removeAssumedBits(NO_WRITES);
6424 }
6425 
6426 } // namespace
6427 
6428 /// -------------------- Memory Locations Attributes ---------------------------
6429 /// Includes read-none, argmemonly, inaccessiblememonly,
6430 /// inaccessiblememorargmemonly
6431 /// ----------------------------------------------------------------------------
6432 
6433 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6434     AAMemoryLocation::MemoryLocationsKind MLK) {
6435   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6436     return "all memory";
6437   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6438     return "no memory";
6439   std::string S = "memory:";
6440   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6441     S += "stack,";
6442   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6443     S += "constant,";
6444   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6445     S += "internal global,";
6446   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6447     S += "external global,";
6448   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6449     S += "argument,";
6450   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6451     S += "inaccessible,";
6452   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6453     S += "malloced,";
6454   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6455     S += "unknown,";
6456   S.pop_back();
6457   return S;
6458 }
6459 
6460 namespace {
6461 struct AAMemoryLocationImpl : public AAMemoryLocation {
6462 
6463   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6464       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6465     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6466       AccessKind2Accesses[u] = nullptr;
6467   }
6468 
6469   ~AAMemoryLocationImpl() {
6470     // The AccessSets are allocated via a BumpPtrAllocator, we call
6471     // the destructor manually.
6472     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6473       if (AccessKind2Accesses[u])
6474         AccessKind2Accesses[u]->~AccessSet();
6475   }
6476 
6477   /// See AbstractAttribute::initialize(...).
6478   void initialize(Attributor &A) override {
6479     intersectAssumedBits(BEST_STATE);
6480     getKnownStateFromValue(A, getIRPosition(), getState());
6481     AAMemoryLocation::initialize(A);
6482   }
6483 
6484   /// Return the memory behavior information encoded in the IR for \p IRP.
6485   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6486                                      BitIntegerState &State,
6487                                      bool IgnoreSubsumingPositions = false) {
6488     // For internal functions we ignore `argmemonly` and
6489     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6490     // constant propagation. It is unclear if this is the best way but it is
6491     // unlikely this will cause real performance problems. If we are deriving
6492     // attributes for the anchor function we even remove the attribute in
6493     // addition to ignoring it.
6494     bool UseArgMemOnly = true;
6495     Function *AnchorFn = IRP.getAnchorScope();
6496     if (AnchorFn && A.isRunOn(*AnchorFn))
6497       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6498 
6499     SmallVector<Attribute, 2> Attrs;
6500     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6501     for (const Attribute &Attr : Attrs) {
6502       switch (Attr.getKindAsEnum()) {
6503       case Attribute::ReadNone:
6504         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6505         break;
6506       case Attribute::InaccessibleMemOnly:
6507         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6508         break;
6509       case Attribute::ArgMemOnly:
6510         if (UseArgMemOnly)
6511           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6512         else
6513           IRP.removeAttrs({Attribute::ArgMemOnly});
6514         break;
6515       case Attribute::InaccessibleMemOrArgMemOnly:
6516         if (UseArgMemOnly)
6517           State.addKnownBits(inverseLocation(
6518               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6519         else
6520           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6521         break;
6522       default:
6523         llvm_unreachable("Unexpected attribute!");
6524       }
6525     }
6526   }
6527 
6528   /// See AbstractAttribute::getDeducedAttributes(...).
6529   void getDeducedAttributes(LLVMContext &Ctx,
6530                             SmallVectorImpl<Attribute> &Attrs) const override {
6531     assert(Attrs.size() == 0);
6532     if (isAssumedReadNone()) {
6533       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6534     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6535       if (isAssumedInaccessibleMemOnly())
6536         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6537       else if (isAssumedArgMemOnly())
6538         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6539       else if (isAssumedInaccessibleOrArgMemOnly())
6540         Attrs.push_back(
6541             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6542     }
6543     assert(Attrs.size() <= 1);
6544   }
6545 
6546   /// See AbstractAttribute::manifest(...).
6547   ChangeStatus manifest(Attributor &A) override {
6548     const IRPosition &IRP = getIRPosition();
6549 
6550     // Check if we would improve the existing attributes first.
6551     SmallVector<Attribute, 4> DeducedAttrs;
6552     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6553     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6554           return IRP.hasAttr(Attr.getKindAsEnum(),
6555                              /* IgnoreSubsumingPositions */ true);
6556         }))
6557       return ChangeStatus::UNCHANGED;
6558 
6559     // Clear existing attributes.
6560     IRP.removeAttrs(AttrKinds);
6561     if (isAssumedReadNone())
6562       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6563 
6564     // Use the generic manifest method.
6565     return IRAttribute::manifest(A);
6566   }
6567 
6568   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6569   bool checkForAllAccessesToMemoryKind(
6570       function_ref<bool(const Instruction *, const Value *, AccessKind,
6571                         MemoryLocationsKind)>
6572           Pred,
6573       MemoryLocationsKind RequestedMLK) const override {
6574     if (!isValidState())
6575       return false;
6576 
6577     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6578     if (AssumedMLK == NO_LOCATIONS)
6579       return true;
6580 
6581     unsigned Idx = 0;
6582     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6583          CurMLK *= 2, ++Idx) {
6584       if (CurMLK & RequestedMLK)
6585         continue;
6586 
6587       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6588         for (const AccessInfo &AI : *Accesses)
6589           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6590             return false;
6591     }
6592 
6593     return true;
6594   }
6595 
6596   ChangeStatus indicatePessimisticFixpoint() override {
6597     // If we give up and indicate a pessimistic fixpoint this instruction will
6598     // become an access for all potential access kinds:
6599     // TODO: Add pointers for argmemonly and globals to improve the results of
6600     //       checkForAllAccessesToMemoryKind.
6601     bool Changed = false;
6602     MemoryLocationsKind KnownMLK = getKnown();
6603     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6604     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6605       if (!(CurMLK & KnownMLK))
6606         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6607                                   getAccessKindFromInst(I));
6608     return AAMemoryLocation::indicatePessimisticFixpoint();
6609   }
6610 
6611 protected:
6612   /// Helper struct to tie together an instruction that has a read or write
6613   /// effect with the pointer it accesses (if any).
6614   struct AccessInfo {
6615 
6616     /// The instruction that caused the access.
6617     const Instruction *I;
6618 
6619     /// The base pointer that is accessed, or null if unknown.
6620     const Value *Ptr;
6621 
6622     /// The kind of access (read/write/read+write).
6623     AccessKind Kind;
6624 
6625     bool operator==(const AccessInfo &RHS) const {
6626       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6627     }
6628     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6629       if (LHS.I != RHS.I)
6630         return LHS.I < RHS.I;
6631       if (LHS.Ptr != RHS.Ptr)
6632         return LHS.Ptr < RHS.Ptr;
6633       if (LHS.Kind != RHS.Kind)
6634         return LHS.Kind < RHS.Kind;
6635       return false;
6636     }
6637   };
6638 
6639   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6640   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6641   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6642   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6643 
6644   /// Categorize the pointer arguments of CB that might access memory in
6645   /// AccessedLoc and update the state and access map accordingly.
6646   void
6647   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6648                                      AAMemoryLocation::StateType &AccessedLocs,
6649                                      bool &Changed);
6650 
6651   /// Return the kind(s) of location that may be accessed by \p V.
6652   AAMemoryLocation::MemoryLocationsKind
6653   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6654 
6655   /// Return the access kind as determined by \p I.
6656   AccessKind getAccessKindFromInst(const Instruction *I) {
6657     AccessKind AK = READ_WRITE;
6658     if (I) {
6659       AK = I->mayReadFromMemory() ? READ : NONE;
6660       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6661     }
6662     return AK;
6663   }
6664 
6665   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6666   /// an access of kind \p AK to a \p MLK memory location with the access
6667   /// pointer \p Ptr.
6668   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6669                                  MemoryLocationsKind MLK, const Instruction *I,
6670                                  const Value *Ptr, bool &Changed,
6671                                  AccessKind AK = READ_WRITE) {
6672 
6673     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6674     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6675     if (!Accesses)
6676       Accesses = new (Allocator) AccessSet();
6677     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6678     State.removeAssumedBits(MLK);
6679   }
6680 
6681   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6682   /// arguments, and update the state and access map accordingly.
6683   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6684                           AAMemoryLocation::StateType &State, bool &Changed);
6685 
6686   /// Used to allocate access sets.
6687   BumpPtrAllocator &Allocator;
6688 
6689   /// The set of IR attributes AAMemoryLocation deals with.
6690   static const Attribute::AttrKind AttrKinds[4];
6691 };
6692 
6693 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6694     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6695     Attribute::InaccessibleMemOrArgMemOnly};
6696 
6697 void AAMemoryLocationImpl::categorizePtrValue(
6698     Attributor &A, const Instruction &I, const Value &Ptr,
6699     AAMemoryLocation::StateType &State, bool &Changed) {
6700   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6701                     << Ptr << " ["
6702                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6703 
6704   auto StripGEPCB = [](Value *V) -> Value * {
6705     auto *GEP = dyn_cast<GEPOperator>(V);
6706     while (GEP) {
6707       V = GEP->getPointerOperand();
6708       GEP = dyn_cast<GEPOperator>(V);
6709     }
6710     return V;
6711   };
6712 
6713   auto VisitValueCB = [&](Value &V, const Instruction *,
6714                           AAMemoryLocation::StateType &T,
6715                           bool Stripped) -> bool {
6716     // TODO: recognize the TBAA used for constant accesses.
6717     MemoryLocationsKind MLK = NO_LOCATIONS;
6718     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6719     if (isa<UndefValue>(V))
6720       return true;
6721     if (auto *Arg = dyn_cast<Argument>(&V)) {
6722       if (Arg->hasByValAttr())
6723         MLK = NO_LOCAL_MEM;
6724       else
6725         MLK = NO_ARGUMENT_MEM;
6726     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6727       // Reading constant memory is not treated as a read "effect" by the
6728       // function attr pass so we won't neither. Constants defined by TBAA are
6729       // similar. (We know we do not write it because it is constant.)
6730       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6731         if (GVar->isConstant())
6732           return true;
6733 
6734       if (GV->hasLocalLinkage())
6735         MLK = NO_GLOBAL_INTERNAL_MEM;
6736       else
6737         MLK = NO_GLOBAL_EXTERNAL_MEM;
6738     } else if (isa<ConstantPointerNull>(V) &&
6739                !NullPointerIsDefined(getAssociatedFunction(),
6740                                      V.getType()->getPointerAddressSpace())) {
6741       return true;
6742     } else if (isa<AllocaInst>(V)) {
6743       MLK = NO_LOCAL_MEM;
6744     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6745       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6746           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6747       if (NoAliasAA.isAssumedNoAlias())
6748         MLK = NO_MALLOCED_MEM;
6749       else
6750         MLK = NO_UNKOWN_MEM;
6751     } else {
6752       MLK = NO_UNKOWN_MEM;
6753     }
6754 
6755     assert(MLK != NO_LOCATIONS && "No location specified!");
6756     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6757                               getAccessKindFromInst(&I));
6758     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6759                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6760                       << "\n");
6761     return true;
6762   };
6763 
6764   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6765           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6766           /* UseValueSimplify */ true,
6767           /* MaxValues */ 32, StripGEPCB)) {
6768     LLVM_DEBUG(
6769         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6770     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6771                               getAccessKindFromInst(&I));
6772   } else {
6773     LLVM_DEBUG(
6774         dbgs()
6775         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6776         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6777   }
6778 }
6779 
6780 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6781     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6782     bool &Changed) {
6783   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6784 
6785     // Skip non-pointer arguments.
6786     const Value *ArgOp = CB.getArgOperand(ArgNo);
6787     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6788       continue;
6789 
6790     // Skip readnone arguments.
6791     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6792     const auto &ArgOpMemLocationAA =
6793         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6794 
6795     if (ArgOpMemLocationAA.isAssumedReadNone())
6796       continue;
6797 
6798     // Categorize potentially accessed pointer arguments as if there was an
6799     // access instruction with them as pointer.
6800     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6801   }
6802 }
6803 
6804 AAMemoryLocation::MemoryLocationsKind
6805 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6806                                                   bool &Changed) {
6807   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6808                     << I << "\n");
6809 
6810   AAMemoryLocation::StateType AccessedLocs;
6811   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6812 
6813   if (auto *CB = dyn_cast<CallBase>(&I)) {
6814 
6815     // First check if we assume any memory is access is visible.
6816     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6817         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6818     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6819                       << " [" << CBMemLocationAA << "]\n");
6820 
6821     if (CBMemLocationAA.isAssumedReadNone())
6822       return NO_LOCATIONS;
6823 
6824     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6825       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6826                                 Changed, getAccessKindFromInst(&I));
6827       return AccessedLocs.getAssumed();
6828     }
6829 
6830     uint32_t CBAssumedNotAccessedLocs =
6831         CBMemLocationAA.getAssumedNotAccessedLocation();
6832 
6833     // Set the argmemonly and global bit as we handle them separately below.
6834     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6835         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6836 
6837     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6838       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6839         continue;
6840       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6841                                 getAccessKindFromInst(&I));
6842     }
6843 
6844     // Now handle global memory if it might be accessed. This is slightly tricky
6845     // as NO_GLOBAL_MEM has multiple bits set.
6846     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6847     if (HasGlobalAccesses) {
6848       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6849                             AccessKind Kind, MemoryLocationsKind MLK) {
6850         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6851                                   getAccessKindFromInst(&I));
6852         return true;
6853       };
6854       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6855               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6856         return AccessedLocs.getWorstState();
6857     }
6858 
6859     LLVM_DEBUG(
6860         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6861                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6862 
6863     // Now handle argument memory if it might be accessed.
6864     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6865     if (HasArgAccesses)
6866       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6867 
6868     LLVM_DEBUG(
6869         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6870                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6871 
6872     return AccessedLocs.getAssumed();
6873   }
6874 
6875   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6876     LLVM_DEBUG(
6877         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6878                << I << " [" << *Ptr << "]\n");
6879     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6880     return AccessedLocs.getAssumed();
6881   }
6882 
6883   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6884                     << I << "\n");
6885   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6886                             getAccessKindFromInst(&I));
6887   return AccessedLocs.getAssumed();
6888 }
6889 
6890 /// An AA to represent the memory behavior function attributes.
6891 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6892   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6893       : AAMemoryLocationImpl(IRP, A) {}
6894 
6895   /// See AbstractAttribute::updateImpl(Attributor &A).
6896   virtual ChangeStatus updateImpl(Attributor &A) override {
6897 
6898     const auto &MemBehaviorAA =
6899         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6900     if (MemBehaviorAA.isAssumedReadNone()) {
6901       if (MemBehaviorAA.isKnownReadNone())
6902         return indicateOptimisticFixpoint();
6903       assert(isAssumedReadNone() &&
6904              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6905       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6906       return ChangeStatus::UNCHANGED;
6907     }
6908 
6909     // The current assumed state used to determine a change.
6910     auto AssumedState = getAssumed();
6911     bool Changed = false;
6912 
6913     auto CheckRWInst = [&](Instruction &I) {
6914       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6915       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6916                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6917       removeAssumedBits(inverseLocation(MLK, false, false));
6918       // Stop once only the valid bit set in the *not assumed location*, thus
6919       // once we don't actually exclude any memory locations in the state.
6920       return getAssumedNotAccessedLocation() != VALID_STATE;
6921     };
6922 
6923     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6924       return indicatePessimisticFixpoint();
6925 
6926     Changed |= AssumedState != getAssumed();
6927     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6928   }
6929 
6930   /// See AbstractAttribute::trackStatistics()
6931   void trackStatistics() const override {
6932     if (isAssumedReadNone())
6933       STATS_DECLTRACK_FN_ATTR(readnone)
6934     else if (isAssumedArgMemOnly())
6935       STATS_DECLTRACK_FN_ATTR(argmemonly)
6936     else if (isAssumedInaccessibleMemOnly())
6937       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6938     else if (isAssumedInaccessibleOrArgMemOnly())
6939       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6940   }
6941 };
6942 
6943 /// AAMemoryLocation attribute for call sites.
6944 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6945   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6946       : AAMemoryLocationImpl(IRP, A) {}
6947 
6948   /// See AbstractAttribute::initialize(...).
6949   void initialize(Attributor &A) override {
6950     AAMemoryLocationImpl::initialize(A);
6951     Function *F = getAssociatedFunction();
6952     if (!F || F->isDeclaration())
6953       indicatePessimisticFixpoint();
6954   }
6955 
6956   /// See AbstractAttribute::updateImpl(...).
6957   ChangeStatus updateImpl(Attributor &A) override {
6958     // TODO: Once we have call site specific value information we can provide
6959     //       call site specific liveness liveness information and then it makes
6960     //       sense to specialize attributes for call sites arguments instead of
6961     //       redirecting requests to the callee argument.
6962     Function *F = getAssociatedFunction();
6963     const IRPosition &FnPos = IRPosition::function(*F);
6964     auto &FnAA =
6965         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6966     bool Changed = false;
6967     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6968                           AccessKind Kind, MemoryLocationsKind MLK) {
6969       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6970                                 getAccessKindFromInst(I));
6971       return true;
6972     };
6973     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6974       return indicatePessimisticFixpoint();
6975     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6976   }
6977 
6978   /// See AbstractAttribute::trackStatistics()
6979   void trackStatistics() const override {
6980     if (isAssumedReadNone())
6981       STATS_DECLTRACK_CS_ATTR(readnone)
6982   }
6983 };
6984 
6985 /// ------------------ Value Constant Range Attribute -------------------------
6986 
6987 struct AAValueConstantRangeImpl : AAValueConstantRange {
6988   using StateType = IntegerRangeState;
6989   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6990       : AAValueConstantRange(IRP, A) {}
6991 
6992   /// See AbstractAttribute::getAsStr().
6993   const std::string getAsStr() const override {
6994     std::string Str;
6995     llvm::raw_string_ostream OS(Str);
6996     OS << "range(" << getBitWidth() << ")<";
6997     getKnown().print(OS);
6998     OS << " / ";
6999     getAssumed().print(OS);
7000     OS << ">";
7001     return OS.str();
7002   }
7003 
7004   /// Helper function to get a SCEV expr for the associated value at program
7005   /// point \p I.
7006   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7007     if (!getAnchorScope())
7008       return nullptr;
7009 
7010     ScalarEvolution *SE =
7011         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7012             *getAnchorScope());
7013 
7014     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7015         *getAnchorScope());
7016 
7017     if (!SE || !LI)
7018       return nullptr;
7019 
7020     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7021     if (!I)
7022       return S;
7023 
7024     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7025   }
7026 
7027   /// Helper function to get a range from SCEV for the associated value at
7028   /// program point \p I.
7029   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7030                                          const Instruction *I = nullptr) const {
7031     if (!getAnchorScope())
7032       return getWorstState(getBitWidth());
7033 
7034     ScalarEvolution *SE =
7035         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7036             *getAnchorScope());
7037 
7038     const SCEV *S = getSCEV(A, I);
7039     if (!SE || !S)
7040       return getWorstState(getBitWidth());
7041 
7042     return SE->getUnsignedRange(S);
7043   }
7044 
7045   /// Helper function to get a range from LVI for the associated value at
7046   /// program point \p I.
7047   ConstantRange
7048   getConstantRangeFromLVI(Attributor &A,
7049                           const Instruction *CtxI = nullptr) const {
7050     if (!getAnchorScope())
7051       return getWorstState(getBitWidth());
7052 
7053     LazyValueInfo *LVI =
7054         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7055             *getAnchorScope());
7056 
7057     if (!LVI || !CtxI)
7058       return getWorstState(getBitWidth());
7059     return LVI->getConstantRange(&getAssociatedValue(),
7060                                  const_cast<Instruction *>(CtxI));
7061   }
7062 
7063   /// See AAValueConstantRange::getKnownConstantRange(..).
7064   ConstantRange
7065   getKnownConstantRange(Attributor &A,
7066                         const Instruction *CtxI = nullptr) const override {
7067     if (!CtxI || CtxI == getCtxI())
7068       return getKnown();
7069 
7070     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7071     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7072     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7073   }
7074 
7075   /// See AAValueConstantRange::getAssumedConstantRange(..).
7076   ConstantRange
7077   getAssumedConstantRange(Attributor &A,
7078                           const Instruction *CtxI = nullptr) const override {
7079     // TODO: Make SCEV use Attributor assumption.
7080     //       We may be able to bound a variable range via assumptions in
7081     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7082     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7083 
7084     if (!CtxI || CtxI == getCtxI())
7085       return getAssumed();
7086 
7087     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7088     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7089     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7090   }
7091 
7092   /// See AbstractAttribute::initialize(..).
7093   void initialize(Attributor &A) override {
7094     // Intersect a range given by SCEV.
7095     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7096 
7097     // Intersect a range given by LVI.
7098     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7099   }
7100 
7101   /// Helper function to create MDNode for range metadata.
7102   static MDNode *
7103   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7104                             const ConstantRange &AssumedConstantRange) {
7105     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7106                                   Ty, AssumedConstantRange.getLower())),
7107                               ConstantAsMetadata::get(ConstantInt::get(
7108                                   Ty, AssumedConstantRange.getUpper()))};
7109     return MDNode::get(Ctx, LowAndHigh);
7110   }
7111 
7112   /// Return true if \p Assumed is included in \p KnownRanges.
7113   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7114 
7115     if (Assumed.isFullSet())
7116       return false;
7117 
7118     if (!KnownRanges)
7119       return true;
7120 
7121     // If multiple ranges are annotated in IR, we give up to annotate assumed
7122     // range for now.
7123 
7124     // TODO:  If there exists a known range which containts assumed range, we
7125     // can say assumed range is better.
7126     if (KnownRanges->getNumOperands() > 2)
7127       return false;
7128 
7129     ConstantInt *Lower =
7130         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7131     ConstantInt *Upper =
7132         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7133 
7134     ConstantRange Known(Lower->getValue(), Upper->getValue());
7135     return Known.contains(Assumed) && Known != Assumed;
7136   }
7137 
7138   /// Helper function to set range metadata.
7139   static bool
7140   setRangeMetadataIfisBetterRange(Instruction *I,
7141                                   const ConstantRange &AssumedConstantRange) {
7142     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7143     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7144       if (!AssumedConstantRange.isEmptySet()) {
7145         I->setMetadata(LLVMContext::MD_range,
7146                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7147                                                  AssumedConstantRange));
7148         return true;
7149       }
7150     }
7151     return false;
7152   }
7153 
7154   /// See AbstractAttribute::manifest()
7155   ChangeStatus manifest(Attributor &A) override {
7156     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7157     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7158     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7159 
7160     auto &V = getAssociatedValue();
7161     if (!AssumedConstantRange.isEmptySet() &&
7162         !AssumedConstantRange.isSingleElement()) {
7163       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7164         assert(I == getCtxI() && "Should not annotate an instruction which is "
7165                                  "not the context instruction");
7166         if (isa<CallInst>(I) || isa<LoadInst>(I))
7167           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7168             Changed = ChangeStatus::CHANGED;
7169       }
7170     }
7171 
7172     return Changed;
7173   }
7174 };
7175 
7176 struct AAValueConstantRangeArgument final
7177     : AAArgumentFromCallSiteArguments<
7178           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7179           true /* BridgeCallBaseContext */> {
7180   using Base = AAArgumentFromCallSiteArguments<
7181       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7182       true /* BridgeCallBaseContext */>;
7183   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7184       : Base(IRP, A) {}
7185 
7186   /// See AbstractAttribute::initialize(..).
7187   void initialize(Attributor &A) override {
7188     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7189       indicatePessimisticFixpoint();
7190     } else {
7191       Base::initialize(A);
7192     }
7193   }
7194 
7195   /// See AbstractAttribute::trackStatistics()
7196   void trackStatistics() const override {
7197     STATS_DECLTRACK_ARG_ATTR(value_range)
7198   }
7199 };
7200 
7201 struct AAValueConstantRangeReturned
7202     : AAReturnedFromReturnedValues<AAValueConstantRange,
7203                                    AAValueConstantRangeImpl,
7204                                    AAValueConstantRangeImpl::StateType,
7205                                    /* PropogateCallBaseContext */ true> {
7206   using Base =
7207       AAReturnedFromReturnedValues<AAValueConstantRange,
7208                                    AAValueConstantRangeImpl,
7209                                    AAValueConstantRangeImpl::StateType,
7210                                    /* PropogateCallBaseContext */ true>;
7211   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7212       : Base(IRP, A) {}
7213 
7214   /// See AbstractAttribute::initialize(...).
7215   void initialize(Attributor &A) override {}
7216 
7217   /// See AbstractAttribute::trackStatistics()
7218   void trackStatistics() const override {
7219     STATS_DECLTRACK_FNRET_ATTR(value_range)
7220   }
7221 };
7222 
7223 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7224   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7225       : AAValueConstantRangeImpl(IRP, A) {}
7226 
7227   /// See AbstractAttribute::initialize(...).
7228   void initialize(Attributor &A) override {
7229     AAValueConstantRangeImpl::initialize(A);
7230     Value &V = getAssociatedValue();
7231 
7232     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7233       unionAssumed(ConstantRange(C->getValue()));
7234       indicateOptimisticFixpoint();
7235       return;
7236     }
7237 
7238     if (isa<UndefValue>(&V)) {
7239       // Collapse the undef state to 0.
7240       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7241       indicateOptimisticFixpoint();
7242       return;
7243     }
7244 
7245     if (isa<CallBase>(&V))
7246       return;
7247 
7248     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7249       return;
7250     // If it is a load instruction with range metadata, use it.
7251     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7252       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7253         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7254         return;
7255       }
7256 
7257     // We can work with PHI and select instruction as we traverse their operands
7258     // during update.
7259     if (isa<SelectInst>(V) || isa<PHINode>(V))
7260       return;
7261 
7262     // Otherwise we give up.
7263     indicatePessimisticFixpoint();
7264 
7265     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7266                       << getAssociatedValue() << "\n");
7267   }
7268 
7269   bool calculateBinaryOperator(
7270       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7271       const Instruction *CtxI,
7272       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7273     Value *LHS = BinOp->getOperand(0);
7274     Value *RHS = BinOp->getOperand(1);
7275     // TODO: Allow non integers as well.
7276     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7277       return false;
7278 
7279     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7280         *this, IRPosition::value(*LHS, getCallBaseContext()),
7281         DepClassTy::REQUIRED);
7282     QuerriedAAs.push_back(&LHSAA);
7283     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7284 
7285     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7286         *this, IRPosition::value(*RHS, getCallBaseContext()),
7287         DepClassTy::REQUIRED);
7288     QuerriedAAs.push_back(&RHSAA);
7289     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7290 
7291     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7292 
7293     T.unionAssumed(AssumedRange);
7294 
7295     // TODO: Track a known state too.
7296 
7297     return T.isValidState();
7298   }
7299 
7300   bool calculateCastInst(
7301       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7302       const Instruction *CtxI,
7303       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7304     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7305     // TODO: Allow non integers as well.
7306     Value &OpV = *CastI->getOperand(0);
7307     if (!OpV.getType()->isIntegerTy())
7308       return false;
7309 
7310     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7311         *this, IRPosition::value(OpV, getCallBaseContext()),
7312         DepClassTy::REQUIRED);
7313     QuerriedAAs.push_back(&OpAA);
7314     T.unionAssumed(
7315         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7316     return T.isValidState();
7317   }
7318 
7319   bool
7320   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7321                    const Instruction *CtxI,
7322                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7323     Value *LHS = CmpI->getOperand(0);
7324     Value *RHS = CmpI->getOperand(1);
7325     // TODO: Allow non integers as well.
7326     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7327       return false;
7328 
7329     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7330         *this, IRPosition::value(*LHS, getCallBaseContext()),
7331         DepClassTy::REQUIRED);
7332     QuerriedAAs.push_back(&LHSAA);
7333     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7334         *this, IRPosition::value(*RHS, getCallBaseContext()),
7335         DepClassTy::REQUIRED);
7336     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7337     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7338 
7339     // If one of them is empty set, we can't decide.
7340     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7341       return true;
7342 
7343     bool MustTrue = false, MustFalse = false;
7344 
7345     auto AllowedRegion =
7346         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7347 
7348     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7349       MustFalse = true;
7350 
7351     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7352       MustTrue = true;
7353 
7354     assert((!MustTrue || !MustFalse) &&
7355            "Either MustTrue or MustFalse should be false!");
7356 
7357     if (MustTrue)
7358       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7359     else if (MustFalse)
7360       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7361     else
7362       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7363 
7364     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7365                       << " " << RHSAA << "\n");
7366 
7367     // TODO: Track a known state too.
7368     return T.isValidState();
7369   }
7370 
7371   /// See AbstractAttribute::updateImpl(...).
7372   ChangeStatus updateImpl(Attributor &A) override {
7373     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7374                             IntegerRangeState &T, bool Stripped) -> bool {
7375       Instruction *I = dyn_cast<Instruction>(&V);
7376       if (!I || isa<CallBase>(I)) {
7377 
7378         // If the value is not instruction, we query AA to Attributor.
7379         const auto &AA = A.getAAFor<AAValueConstantRange>(
7380             *this, IRPosition::value(V, getCallBaseContext()),
7381             DepClassTy::REQUIRED);
7382 
7383         // Clamp operator is not used to utilize a program point CtxI.
7384         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7385 
7386         return T.isValidState();
7387       }
7388 
7389       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7390       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7391         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7392           return false;
7393       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7394         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7395           return false;
7396       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7397         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7398           return false;
7399       } else {
7400         // Give up with other instructions.
7401         // TODO: Add other instructions
7402 
7403         T.indicatePessimisticFixpoint();
7404         return false;
7405       }
7406 
7407       // Catch circular reasoning in a pessimistic way for now.
7408       // TODO: Check how the range evolves and if we stripped anything, see also
7409       //       AADereferenceable or AAAlign for similar situations.
7410       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7411         if (QueriedAA != this)
7412           continue;
7413         // If we are in a stady state we do not need to worry.
7414         if (T.getAssumed() == getState().getAssumed())
7415           continue;
7416         T.indicatePessimisticFixpoint();
7417       }
7418 
7419       return T.isValidState();
7420     };
7421 
7422     IntegerRangeState T(getBitWidth());
7423 
7424     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7425             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7426             /* UseValueSimplify */ false))
7427       return indicatePessimisticFixpoint();
7428 
7429     return clampStateAndIndicateChange(getState(), T);
7430   }
7431 
7432   /// See AbstractAttribute::trackStatistics()
7433   void trackStatistics() const override {
7434     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7435   }
7436 };
7437 
7438 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7439   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7440       : AAValueConstantRangeImpl(IRP, A) {}
7441 
7442   /// See AbstractAttribute::initialize(...).
7443   ChangeStatus updateImpl(Attributor &A) override {
7444     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7445                      "not be called");
7446   }
7447 
7448   /// See AbstractAttribute::trackStatistics()
7449   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7450 };
7451 
7452 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7453   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7454       : AAValueConstantRangeFunction(IRP, A) {}
7455 
7456   /// See AbstractAttribute::trackStatistics()
7457   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7458 };
7459 
7460 struct AAValueConstantRangeCallSiteReturned
7461     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7462                                      AAValueConstantRangeImpl,
7463                                      AAValueConstantRangeImpl::StateType,
7464                                      /* IntroduceCallBaseContext */ true> {
7465   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7466       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7467                                        AAValueConstantRangeImpl,
7468                                        AAValueConstantRangeImpl::StateType,
7469                                        /* IntroduceCallBaseContext */ true>(IRP,
7470                                                                             A) {
7471   }
7472 
7473   /// See AbstractAttribute::initialize(...).
7474   void initialize(Attributor &A) override {
7475     // If it is a load instruction with range metadata, use the metadata.
7476     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7477       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7478         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7479 
7480     AAValueConstantRangeImpl::initialize(A);
7481   }
7482 
7483   /// See AbstractAttribute::trackStatistics()
7484   void trackStatistics() const override {
7485     STATS_DECLTRACK_CSRET_ATTR(value_range)
7486   }
7487 };
7488 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7489   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7490       : AAValueConstantRangeFloating(IRP, A) {}
7491 
7492   /// See AbstractAttribute::manifest()
7493   ChangeStatus manifest(Attributor &A) override {
7494     return ChangeStatus::UNCHANGED;
7495   }
7496 
7497   /// See AbstractAttribute::trackStatistics()
7498   void trackStatistics() const override {
7499     STATS_DECLTRACK_CSARG_ATTR(value_range)
7500   }
7501 };
7502 
7503 /// ------------------ Potential Values Attribute -------------------------
7504 
7505 struct AAPotentialValuesImpl : AAPotentialValues {
7506   using StateType = PotentialConstantIntValuesState;
7507 
7508   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7509       : AAPotentialValues(IRP, A) {}
7510 
7511   /// See AbstractAttribute::getAsStr().
7512   const std::string getAsStr() const override {
7513     std::string Str;
7514     llvm::raw_string_ostream OS(Str);
7515     OS << getState();
7516     return OS.str();
7517   }
7518 
7519   /// See AbstractAttribute::updateImpl(...).
7520   ChangeStatus updateImpl(Attributor &A) override {
7521     return indicatePessimisticFixpoint();
7522   }
7523 };
7524 
7525 struct AAPotentialValuesArgument final
7526     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7527                                       PotentialConstantIntValuesState> {
7528   using Base =
7529       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7530                                       PotentialConstantIntValuesState>;
7531   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7532       : Base(IRP, A) {}
7533 
7534   /// See AbstractAttribute::initialize(..).
7535   void initialize(Attributor &A) override {
7536     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7537       indicatePessimisticFixpoint();
7538     } else {
7539       Base::initialize(A);
7540     }
7541   }
7542 
7543   /// See AbstractAttribute::trackStatistics()
7544   void trackStatistics() const override {
7545     STATS_DECLTRACK_ARG_ATTR(potential_values)
7546   }
7547 };
7548 
7549 struct AAPotentialValuesReturned
7550     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7551   using Base =
7552       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7553   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7554       : Base(IRP, A) {}
7555 
7556   /// See AbstractAttribute::trackStatistics()
7557   void trackStatistics() const override {
7558     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7559   }
7560 };
7561 
7562 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7563   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7564       : AAPotentialValuesImpl(IRP, A) {}
7565 
7566   /// See AbstractAttribute::initialize(..).
7567   void initialize(Attributor &A) override {
7568     Value &V = getAssociatedValue();
7569 
7570     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7571       unionAssumed(C->getValue());
7572       indicateOptimisticFixpoint();
7573       return;
7574     }
7575 
7576     if (isa<UndefValue>(&V)) {
7577       unionAssumedWithUndef();
7578       indicateOptimisticFixpoint();
7579       return;
7580     }
7581 
7582     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7583       return;
7584 
7585     if (isa<SelectInst>(V) || isa<PHINode>(V))
7586       return;
7587 
7588     indicatePessimisticFixpoint();
7589 
7590     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7591                       << getAssociatedValue() << "\n");
7592   }
7593 
7594   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7595                                 const APInt &RHS) {
7596     ICmpInst::Predicate Pred = ICI->getPredicate();
7597     switch (Pred) {
7598     case ICmpInst::ICMP_UGT:
7599       return LHS.ugt(RHS);
7600     case ICmpInst::ICMP_SGT:
7601       return LHS.sgt(RHS);
7602     case ICmpInst::ICMP_EQ:
7603       return LHS.eq(RHS);
7604     case ICmpInst::ICMP_UGE:
7605       return LHS.uge(RHS);
7606     case ICmpInst::ICMP_SGE:
7607       return LHS.sge(RHS);
7608     case ICmpInst::ICMP_ULT:
7609       return LHS.ult(RHS);
7610     case ICmpInst::ICMP_SLT:
7611       return LHS.slt(RHS);
7612     case ICmpInst::ICMP_NE:
7613       return LHS.ne(RHS);
7614     case ICmpInst::ICMP_ULE:
7615       return LHS.ule(RHS);
7616     case ICmpInst::ICMP_SLE:
7617       return LHS.sle(RHS);
7618     default:
7619       llvm_unreachable("Invalid ICmp predicate!");
7620     }
7621   }
7622 
7623   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7624                                  uint32_t ResultBitWidth) {
7625     Instruction::CastOps CastOp = CI->getOpcode();
7626     switch (CastOp) {
7627     default:
7628       llvm_unreachable("unsupported or not integer cast");
7629     case Instruction::Trunc:
7630       return Src.trunc(ResultBitWidth);
7631     case Instruction::SExt:
7632       return Src.sext(ResultBitWidth);
7633     case Instruction::ZExt:
7634       return Src.zext(ResultBitWidth);
7635     case Instruction::BitCast:
7636       return Src;
7637     }
7638   }
7639 
7640   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7641                                        const APInt &LHS, const APInt &RHS,
7642                                        bool &SkipOperation, bool &Unsupported) {
7643     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7644     // Unsupported is set to true when the binary operator is not supported.
7645     // SkipOperation is set to true when UB occur with the given operand pair
7646     // (LHS, RHS).
7647     // TODO: we should look at nsw and nuw keywords to handle operations
7648     //       that create poison or undef value.
7649     switch (BinOpcode) {
7650     default:
7651       Unsupported = true;
7652       return LHS;
7653     case Instruction::Add:
7654       return LHS + RHS;
7655     case Instruction::Sub:
7656       return LHS - RHS;
7657     case Instruction::Mul:
7658       return LHS * RHS;
7659     case Instruction::UDiv:
7660       if (RHS.isNullValue()) {
7661         SkipOperation = true;
7662         return LHS;
7663       }
7664       return LHS.udiv(RHS);
7665     case Instruction::SDiv:
7666       if (RHS.isNullValue()) {
7667         SkipOperation = true;
7668         return LHS;
7669       }
7670       return LHS.sdiv(RHS);
7671     case Instruction::URem:
7672       if (RHS.isNullValue()) {
7673         SkipOperation = true;
7674         return LHS;
7675       }
7676       return LHS.urem(RHS);
7677     case Instruction::SRem:
7678       if (RHS.isNullValue()) {
7679         SkipOperation = true;
7680         return LHS;
7681       }
7682       return LHS.srem(RHS);
7683     case Instruction::Shl:
7684       return LHS.shl(RHS);
7685     case Instruction::LShr:
7686       return LHS.lshr(RHS);
7687     case Instruction::AShr:
7688       return LHS.ashr(RHS);
7689     case Instruction::And:
7690       return LHS & RHS;
7691     case Instruction::Or:
7692       return LHS | RHS;
7693     case Instruction::Xor:
7694       return LHS ^ RHS;
7695     }
7696   }
7697 
7698   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7699                                            const APInt &LHS, const APInt &RHS) {
7700     bool SkipOperation = false;
7701     bool Unsupported = false;
7702     APInt Result =
7703         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7704     if (Unsupported)
7705       return false;
7706     // If SkipOperation is true, we can ignore this operand pair (L, R).
7707     if (!SkipOperation)
7708       unionAssumed(Result);
7709     return isValidState();
7710   }
7711 
7712   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7713     auto AssumedBefore = getAssumed();
7714     Value *LHS = ICI->getOperand(0);
7715     Value *RHS = ICI->getOperand(1);
7716     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7717       return indicatePessimisticFixpoint();
7718 
7719     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7720                                                 DepClassTy::REQUIRED);
7721     if (!LHSAA.isValidState())
7722       return indicatePessimisticFixpoint();
7723 
7724     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7725                                                 DepClassTy::REQUIRED);
7726     if (!RHSAA.isValidState())
7727       return indicatePessimisticFixpoint();
7728 
7729     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7730     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7731 
7732     // TODO: make use of undef flag to limit potential values aggressively.
7733     bool MaybeTrue = false, MaybeFalse = false;
7734     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7735     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7736       // The result of any comparison between undefs can be soundly replaced
7737       // with undef.
7738       unionAssumedWithUndef();
7739     } else if (LHSAA.undefIsContained()) {
7740       for (const APInt &R : RHSAAPVS) {
7741         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7742         MaybeTrue |= CmpResult;
7743         MaybeFalse |= !CmpResult;
7744         if (MaybeTrue & MaybeFalse)
7745           return indicatePessimisticFixpoint();
7746       }
7747     } else if (RHSAA.undefIsContained()) {
7748       for (const APInt &L : LHSAAPVS) {
7749         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7750         MaybeTrue |= CmpResult;
7751         MaybeFalse |= !CmpResult;
7752         if (MaybeTrue & MaybeFalse)
7753           return indicatePessimisticFixpoint();
7754       }
7755     } else {
7756       for (const APInt &L : LHSAAPVS) {
7757         for (const APInt &R : RHSAAPVS) {
7758           bool CmpResult = calculateICmpInst(ICI, L, R);
7759           MaybeTrue |= CmpResult;
7760           MaybeFalse |= !CmpResult;
7761           if (MaybeTrue & MaybeFalse)
7762             return indicatePessimisticFixpoint();
7763         }
7764       }
7765     }
7766     if (MaybeTrue)
7767       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7768     if (MaybeFalse)
7769       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7770     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7771                                          : ChangeStatus::CHANGED;
7772   }
7773 
7774   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7775     auto AssumedBefore = getAssumed();
7776     Value *LHS = SI->getTrueValue();
7777     Value *RHS = SI->getFalseValue();
7778     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7779       return indicatePessimisticFixpoint();
7780 
7781     // TODO: Use assumed simplified condition value
7782     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7783                                                 DepClassTy::REQUIRED);
7784     if (!LHSAA.isValidState())
7785       return indicatePessimisticFixpoint();
7786 
7787     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7788                                                 DepClassTy::REQUIRED);
7789     if (!RHSAA.isValidState())
7790       return indicatePessimisticFixpoint();
7791 
7792     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7793       // select i1 *, undef , undef => undef
7794       unionAssumedWithUndef();
7795     else {
7796       unionAssumed(LHSAA);
7797       unionAssumed(RHSAA);
7798     }
7799     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7800                                          : ChangeStatus::CHANGED;
7801   }
7802 
7803   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7804     auto AssumedBefore = getAssumed();
7805     if (!CI->isIntegerCast())
7806       return indicatePessimisticFixpoint();
7807     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7808     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7809     Value *Src = CI->getOperand(0);
7810     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7811                                                 DepClassTy::REQUIRED);
7812     if (!SrcAA.isValidState())
7813       return indicatePessimisticFixpoint();
7814     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7815     if (SrcAA.undefIsContained())
7816       unionAssumedWithUndef();
7817     else {
7818       for (const APInt &S : SrcAAPVS) {
7819         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7820         unionAssumed(T);
7821       }
7822     }
7823     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7824                                          : ChangeStatus::CHANGED;
7825   }
7826 
7827   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7828     auto AssumedBefore = getAssumed();
7829     Value *LHS = BinOp->getOperand(0);
7830     Value *RHS = BinOp->getOperand(1);
7831     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7832       return indicatePessimisticFixpoint();
7833 
7834     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7835                                                 DepClassTy::REQUIRED);
7836     if (!LHSAA.isValidState())
7837       return indicatePessimisticFixpoint();
7838 
7839     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7840                                                 DepClassTy::REQUIRED);
7841     if (!RHSAA.isValidState())
7842       return indicatePessimisticFixpoint();
7843 
7844     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7845     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7846     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7847 
7848     // TODO: make use of undef flag to limit potential values aggressively.
7849     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7850       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7851         return indicatePessimisticFixpoint();
7852     } else if (LHSAA.undefIsContained()) {
7853       for (const APInt &R : RHSAAPVS) {
7854         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7855           return indicatePessimisticFixpoint();
7856       }
7857     } else if (RHSAA.undefIsContained()) {
7858       for (const APInt &L : LHSAAPVS) {
7859         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7860           return indicatePessimisticFixpoint();
7861       }
7862     } else {
7863       for (const APInt &L : LHSAAPVS) {
7864         for (const APInt &R : RHSAAPVS) {
7865           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7866             return indicatePessimisticFixpoint();
7867         }
7868       }
7869     }
7870     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7871                                          : ChangeStatus::CHANGED;
7872   }
7873 
7874   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7875     auto AssumedBefore = getAssumed();
7876     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7877       Value *IncomingValue = PHI->getIncomingValue(u);
7878       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7879           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7880       if (!PotentialValuesAA.isValidState())
7881         return indicatePessimisticFixpoint();
7882       if (PotentialValuesAA.undefIsContained())
7883         unionAssumedWithUndef();
7884       else
7885         unionAssumed(PotentialValuesAA.getAssumed());
7886     }
7887     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7888                                          : ChangeStatus::CHANGED;
7889   }
7890 
7891   /// See AbstractAttribute::updateImpl(...).
7892   ChangeStatus updateImpl(Attributor &A) override {
7893     Value &V = getAssociatedValue();
7894     Instruction *I = dyn_cast<Instruction>(&V);
7895 
7896     if (auto *ICI = dyn_cast<ICmpInst>(I))
7897       return updateWithICmpInst(A, ICI);
7898 
7899     if (auto *SI = dyn_cast<SelectInst>(I))
7900       return updateWithSelectInst(A, SI);
7901 
7902     if (auto *CI = dyn_cast<CastInst>(I))
7903       return updateWithCastInst(A, CI);
7904 
7905     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7906       return updateWithBinaryOperator(A, BinOp);
7907 
7908     if (auto *PHI = dyn_cast<PHINode>(I))
7909       return updateWithPHINode(A, PHI);
7910 
7911     return indicatePessimisticFixpoint();
7912   }
7913 
7914   /// See AbstractAttribute::trackStatistics()
7915   void trackStatistics() const override {
7916     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7917   }
7918 };
7919 
7920 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7921   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7922       : AAPotentialValuesImpl(IRP, A) {}
7923 
7924   /// See AbstractAttribute::initialize(...).
7925   ChangeStatus updateImpl(Attributor &A) override {
7926     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7927                      "not be called");
7928   }
7929 
7930   /// See AbstractAttribute::trackStatistics()
7931   void trackStatistics() const override {
7932     STATS_DECLTRACK_FN_ATTR(potential_values)
7933   }
7934 };
7935 
7936 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7937   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7938       : AAPotentialValuesFunction(IRP, A) {}
7939 
7940   /// See AbstractAttribute::trackStatistics()
7941   void trackStatistics() const override {
7942     STATS_DECLTRACK_CS_ATTR(potential_values)
7943   }
7944 };
7945 
7946 struct AAPotentialValuesCallSiteReturned
7947     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7948   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7949       : AACallSiteReturnedFromReturned<AAPotentialValues,
7950                                        AAPotentialValuesImpl>(IRP, A) {}
7951 
7952   /// See AbstractAttribute::trackStatistics()
7953   void trackStatistics() const override {
7954     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7955   }
7956 };
7957 
7958 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7959   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7960       : AAPotentialValuesFloating(IRP, A) {}
7961 
7962   /// See AbstractAttribute::initialize(..).
7963   void initialize(Attributor &A) override {
7964     Value &V = getAssociatedValue();
7965 
7966     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7967       unionAssumed(C->getValue());
7968       indicateOptimisticFixpoint();
7969       return;
7970     }
7971 
7972     if (isa<UndefValue>(&V)) {
7973       unionAssumedWithUndef();
7974       indicateOptimisticFixpoint();
7975       return;
7976     }
7977   }
7978 
7979   /// See AbstractAttribute::updateImpl(...).
7980   ChangeStatus updateImpl(Attributor &A) override {
7981     Value &V = getAssociatedValue();
7982     auto AssumedBefore = getAssumed();
7983     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7984                                              DepClassTy::REQUIRED);
7985     const auto &S = AA.getAssumed();
7986     unionAssumed(S);
7987     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7988                                          : ChangeStatus::CHANGED;
7989   }
7990 
7991   /// See AbstractAttribute::trackStatistics()
7992   void trackStatistics() const override {
7993     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7994   }
7995 };
7996 
7997 /// ------------------------ NoUndef Attribute ---------------------------------
7998 struct AANoUndefImpl : AANoUndef {
7999   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8000 
8001   /// See AbstractAttribute::initialize(...).
8002   void initialize(Attributor &A) override {
8003     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8004       indicateOptimisticFixpoint();
8005       return;
8006     }
8007     Value &V = getAssociatedValue();
8008     if (isa<UndefValue>(V))
8009       indicatePessimisticFixpoint();
8010     else if (isa<FreezeInst>(V))
8011       indicateOptimisticFixpoint();
8012     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8013              isGuaranteedNotToBeUndefOrPoison(&V))
8014       indicateOptimisticFixpoint();
8015     else
8016       AANoUndef::initialize(A);
8017   }
8018 
8019   /// See followUsesInMBEC
8020   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8021                        AANoUndef::StateType &State) {
8022     const Value *UseV = U->get();
8023     const DominatorTree *DT = nullptr;
8024     AssumptionCache *AC = nullptr;
8025     InformationCache &InfoCache = A.getInfoCache();
8026     if (Function *F = getAnchorScope()) {
8027       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8028       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8029     }
8030     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8031     bool TrackUse = false;
8032     // Track use for instructions which must produce undef or poison bits when
8033     // at least one operand contains such bits.
8034     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8035       TrackUse = true;
8036     return TrackUse;
8037   }
8038 
8039   /// See AbstractAttribute::getAsStr().
8040   const std::string getAsStr() const override {
8041     return getAssumed() ? "noundef" : "may-undef-or-poison";
8042   }
8043 
8044   ChangeStatus manifest(Attributor &A) override {
8045     // We don't manifest noundef attribute for dead positions because the
8046     // associated values with dead positions would be replaced with undef
8047     // values.
8048     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8049       return ChangeStatus::UNCHANGED;
8050     // A position whose simplified value does not have any value is
8051     // considered to be dead. We don't manifest noundef in such positions for
8052     // the same reason above.
8053     auto &ValueSimplifyAA =
8054         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8055     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8056       return ChangeStatus::UNCHANGED;
8057     return AANoUndef::manifest(A);
8058   }
8059 };
8060 
8061 struct AANoUndefFloating : public AANoUndefImpl {
8062   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8063       : AANoUndefImpl(IRP, A) {}
8064 
8065   /// See AbstractAttribute::initialize(...).
8066   void initialize(Attributor &A) override {
8067     AANoUndefImpl::initialize(A);
8068     if (!getState().isAtFixpoint())
8069       if (Instruction *CtxI = getCtxI())
8070         followUsesInMBEC(*this, A, getState(), *CtxI);
8071   }
8072 
8073   /// See AbstractAttribute::updateImpl(...).
8074   ChangeStatus updateImpl(Attributor &A) override {
8075     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8076                             AANoUndef::StateType &T, bool Stripped) -> bool {
8077       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8078                                              DepClassTy::REQUIRED);
8079       if (!Stripped && this == &AA) {
8080         T.indicatePessimisticFixpoint();
8081       } else {
8082         const AANoUndef::StateType &S =
8083             static_cast<const AANoUndef::StateType &>(AA.getState());
8084         T ^= S;
8085       }
8086       return T.isValidState();
8087     };
8088 
8089     StateType T;
8090     if (!genericValueTraversal<AANoUndef, StateType>(
8091             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8092       return indicatePessimisticFixpoint();
8093 
8094     return clampStateAndIndicateChange(getState(), T);
8095   }
8096 
8097   /// See AbstractAttribute::trackStatistics()
8098   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8099 };
8100 
8101 struct AANoUndefReturned final
8102     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8103   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8104       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8105 
8106   /// See AbstractAttribute::trackStatistics()
8107   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8108 };
8109 
8110 struct AANoUndefArgument final
8111     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8112   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8113       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8114 
8115   /// See AbstractAttribute::trackStatistics()
8116   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8117 };
8118 
8119 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8120   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8121       : AANoUndefFloating(IRP, A) {}
8122 
8123   /// See AbstractAttribute::trackStatistics()
8124   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8125 };
8126 
8127 struct AANoUndefCallSiteReturned final
8128     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8129   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8130       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8131 
8132   /// See AbstractAttribute::trackStatistics()
8133   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8134 };
8135 } // namespace
8136 
8137 const char AAReturnedValues::ID = 0;
8138 const char AANoUnwind::ID = 0;
8139 const char AANoSync::ID = 0;
8140 const char AANoFree::ID = 0;
8141 const char AANonNull::ID = 0;
8142 const char AANoRecurse::ID = 0;
8143 const char AAWillReturn::ID = 0;
8144 const char AAUndefinedBehavior::ID = 0;
8145 const char AANoAlias::ID = 0;
8146 const char AAReachability::ID = 0;
8147 const char AANoReturn::ID = 0;
8148 const char AAIsDead::ID = 0;
8149 const char AADereferenceable::ID = 0;
8150 const char AAAlign::ID = 0;
8151 const char AANoCapture::ID = 0;
8152 const char AAValueSimplify::ID = 0;
8153 const char AAHeapToStack::ID = 0;
8154 const char AAPrivatizablePtr::ID = 0;
8155 const char AAMemoryBehavior::ID = 0;
8156 const char AAMemoryLocation::ID = 0;
8157 const char AAValueConstantRange::ID = 0;
8158 const char AAPotentialValues::ID = 0;
8159 const char AANoUndef::ID = 0;
8160 
8161 // Macro magic to create the static generator function for attributes that
8162 // follow the naming scheme.
8163 
8164 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8165   case IRPosition::PK:                                                         \
8166     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8167 
8168 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8169   case IRPosition::PK:                                                         \
8170     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8171     ++NumAAs;                                                                  \
8172     break;
8173 
8174 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8175   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8176     CLASS *AA = nullptr;                                                       \
8177     switch (IRP.getPositionKind()) {                                           \
8178       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8179       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8180       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8181       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8182       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8183       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8184       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8185       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8186     }                                                                          \
8187     return *AA;                                                                \
8188   }
8189 
8190 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8191   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8192     CLASS *AA = nullptr;                                                       \
8193     switch (IRP.getPositionKind()) {                                           \
8194       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8195       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8196       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8197       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8198       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8199       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8200       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8201       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8202     }                                                                          \
8203     return *AA;                                                                \
8204   }
8205 
8206 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8207   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8208     CLASS *AA = nullptr;                                                       \
8209     switch (IRP.getPositionKind()) {                                           \
8210       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8211       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8212       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8213       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8214       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8215       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8216       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8217       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8218     }                                                                          \
8219     return *AA;                                                                \
8220   }
8221 
8222 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8223   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8224     CLASS *AA = nullptr;                                                       \
8225     switch (IRP.getPositionKind()) {                                           \
8226       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8227       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8228       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8229       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8230       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8231       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8232       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8233       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8234     }                                                                          \
8235     return *AA;                                                                \
8236   }
8237 
8238 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8239   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8240     CLASS *AA = nullptr;                                                       \
8241     switch (IRP.getPositionKind()) {                                           \
8242       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8243       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8244       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8245       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8246       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8247       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8248       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8249       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8250     }                                                                          \
8251     return *AA;                                                                \
8252   }
8253 
8254 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8255 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8256 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8257 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8258 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8259 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8260 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8261 
8262 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8263 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8264 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8265 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8266 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8267 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8268 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8269 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8270 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8271 
8272 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8273 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8274 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8275 
8276 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8277 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8278 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8279 
8280 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8281 
8282 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8283 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8284 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8285 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8286 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8287 #undef SWITCH_PK_CREATE
8288 #undef SWITCH_PK_INV
8289