1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 /// Get pointer operand of memory accessing instruction. If \p I is
144 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
145 /// is set to false and the instruction is volatile, return nullptr.
146 static const Value *getPointerOperand(const Instruction *I,
147                                       bool AllowVolatile) {
148   if (!AllowVolatile && I->isVolatile())
149     return nullptr;
150 
151   if (auto *LI = dyn_cast<LoadInst>(I)) {
152     return LI->getPointerOperand();
153   }
154 
155   if (auto *SI = dyn_cast<StoreInst>(I)) {
156     return SI->getPointerOperand();
157   }
158 
159   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
160     return CXI->getPointerOperand();
161   }
162 
163   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
164     return RMWI->getPointerOperand();
165   }
166 
167   return nullptr;
168 }
169 
170 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
171 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
172 /// getelement pointer instructions that traverse the natural type of \p Ptr if
173 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
174 /// through a cast to i8*.
175 ///
176 /// TODO: This could probably live somewhere more prominantly if it doesn't
177 ///       already exist.
178 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
179                                int64_t Offset, IRBuilder<NoFolder> &IRB,
180                                const DataLayout &DL) {
181   assert(Offset >= 0 && "Negative offset not supported yet!");
182   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
183                     << "-bytes as " << *ResTy << "\n");
184 
185   if (Offset) {
186     SmallVector<Value *, 4> Indices;
187     std::string GEPName = Ptr->getName().str() + ".0";
188 
189     // Add 0 index to look through the pointer.
190     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
191            "Offset out of bounds");
192     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
193 
194     Type *Ty = PtrElemTy;
195     do {
196       auto *STy = dyn_cast<StructType>(Ty);
197       if (!STy)
198         // Non-aggregate type, we cast and make byte-wise progress now.
199         break;
200 
201       const StructLayout *SL = DL.getStructLayout(STy);
202       if (int64_t(SL->getSizeInBytes()) < Offset)
203         break;
204 
205       uint64_t Idx = SL->getElementContainingOffset(Offset);
206       assert(Idx < STy->getNumElements() && "Offset calculation error!");
207       uint64_t Rem = Offset - SL->getElementOffset(Idx);
208       Ty = STy->getElementType(Idx);
209 
210       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
211                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
212 
213       GEPName += "." + std::to_string(Idx);
214       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
215       Offset = Rem;
216     } while (Offset);
217 
218     // Create a GEP for the indices collected above.
219     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
220 
221     // If an offset is left we use byte-wise adjustment.
222     if (Offset) {
223       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
224       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
225                           GEPName + ".b" + Twine(Offset));
226     }
227   }
228 
229   // Ensure the result has the requested type.
230   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
231 
232   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
233   return Ptr;
234 }
235 
236 /// Recursively visit all values that might become \p IRP at some point. This
237 /// will be done by looking through cast instructions, selects, phis, and calls
238 /// with the "returned" attribute. Once we cannot look through the value any
239 /// further, the callback \p VisitValueCB is invoked and passed the current
240 /// value, the \p State, and a flag to indicate if we stripped anything.
241 /// Stripped means that we unpacked the value associated with \p IRP at least
242 /// once. Note that the value used for the callback may still be the value
243 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
244 /// we will never visit more values than specified by \p MaxValues.
245 template <typename AAType, typename StateTy>
246 static bool genericValueTraversal(
247     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA,
257         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
258         DepClassTy::NONE);
259   bool AnyDead = false;
260 
261   using Item = std::pair<Value *, const Instruction *>;
262   SmallSet<Item, 16> Visited;
263   SmallVector<Item, 16> Worklist;
264   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
265 
266   int Iteration = 0;
267   do {
268     Item I = Worklist.pop_back_val();
269     Value *V = I.first;
270     CtxI = I.second;
271     if (StripCB)
272       V = StripCB(V);
273 
274     // Check if we should process the current value. To prevent endless
275     // recursion keep a record of the values we followed!
276     if (!Visited.insert(I).second)
277       continue;
278 
279     // Make sure we limit the compile time for complex expressions.
280     if (Iteration++ >= MaxValues)
281       return false;
282 
283     // Explicitly look through calls with a "returned" attribute if we do
284     // not have a pointer as stripPointerCasts only works on them.
285     Value *NewV = nullptr;
286     if (V->getType()->isPointerTy()) {
287       NewV = V->stripPointerCasts();
288     } else {
289       auto *CB = dyn_cast<CallBase>(V);
290       if (CB && CB->getCalledFunction()) {
291         for (Argument &Arg : CB->getCalledFunction()->args())
292           if (Arg.hasReturnedAttr()) {
293             NewV = CB->getArgOperand(Arg.getArgNo());
294             break;
295           }
296       }
297     }
298     if (NewV && NewV != V) {
299       Worklist.push_back({NewV, CtxI});
300       continue;
301     }
302 
303     // Look through select instructions, visit both potential values.
304     if (auto *SI = dyn_cast<SelectInst>(V)) {
305       Worklist.push_back({SI->getTrueValue(), CtxI});
306       Worklist.push_back({SI->getFalseValue(), CtxI});
307       continue;
308     }
309 
310     // Look through phi nodes, visit all live operands.
311     if (auto *PHI = dyn_cast<PHINode>(V)) {
312       assert(LivenessAA &&
313              "Expected liveness in the presence of instructions!");
314       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
315         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
316         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
317                             LivenessAA,
318                             /* CheckBBLivenessOnly */ true)) {
319           AnyDead = true;
320           continue;
321         }
322         Worklist.push_back(
323             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
324       }
325       continue;
326     }
327 
328     if (UseValueSimplify && !isa<Constant>(V)) {
329       bool UsedAssumedInformation = false;
330       Optional<Constant *> C =
331           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
332       if (!C.hasValue())
333         continue;
334       if (Value *NewV = C.getValue()) {
335         Worklist.push_back({NewV, CtxI});
336         continue;
337       }
338     }
339 
340     // Once a leaf is reached we inform the user through the callback.
341     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
342       return false;
343   } while (!Worklist.empty());
344 
345   // If we actually used liveness information so we have to record a dependence.
346   if (AnyDead)
347     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
348 
349   // All values have been visited.
350   return true;
351 }
352 
353 const Value *stripAndAccumulateMinimalOffsets(
354     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
355     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
356     bool UseAssumed = false) {
357 
358   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
359     const IRPosition &Pos = IRPosition::value(V);
360     // Only track dependence if we are going to use the assumed info.
361     const AAValueConstantRange &ValueConstantRangeAA =
362         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
363                                          UseAssumed ? DepClassTy::OPTIONAL
364                                                     : DepClassTy::NONE);
365     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
366                                      : ValueConstantRangeAA.getKnown();
367     // We can only use the lower part of the range because the upper part can
368     // be higher than what the value can really be.
369     ROffset = Range.getSignedMin();
370     return true;
371   };
372 
373   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
374                                                 AttributorAnalysis);
375 }
376 
377 static const Value *getMinimalBaseOfAccsesPointerOperand(
378     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
379     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
380   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
381   if (!Ptr)
382     return nullptr;
383   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
384   const Value *Base = stripAndAccumulateMinimalOffsets(
385       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
386 
387   BytesOffset = OffsetAPInt.getSExtValue();
388   return Base;
389 }
390 
391 static const Value *
392 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
393                                      const DataLayout &DL,
394                                      bool AllowNonInbounds = false) {
395   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
396   if (!Ptr)
397     return nullptr;
398 
399   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
400                                           AllowNonInbounds);
401 }
402 
403 /// Helper function to clamp a state \p S of type \p StateType with the
404 /// information in \p R and indicate/return if \p S did change (as-in update is
405 /// required to be run again).
406 template <typename StateType>
407 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
408   auto Assumed = S.getAssumed();
409   S ^= R;
410   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
411                                    : ChangeStatus::CHANGED;
412 }
413 
414 /// Clamp the information known for all returned values of a function
415 /// (identified by \p QueryingAA) into \p S.
416 template <typename AAType, typename StateType = typename AAType::StateType>
417 static void clampReturnedValueStates(
418     Attributor &A, const AAType &QueryingAA, StateType &S,
419     const IRPosition::CallBaseContext *CBContext = nullptr) {
420   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
421                     << QueryingAA << " into " << S << "\n");
422 
423   assert((QueryingAA.getIRPosition().getPositionKind() ==
424               IRPosition::IRP_RETURNED ||
425           QueryingAA.getIRPosition().getPositionKind() ==
426               IRPosition::IRP_CALL_SITE_RETURNED) &&
427          "Can only clamp returned value states for a function returned or call "
428          "site returned position!");
429 
430   // Use an optional state as there might not be any return values and we want
431   // to join (IntegerState::operator&) the state of all there are.
432   Optional<StateType> T;
433 
434   // Callback for each possibly returned value.
435   auto CheckReturnValue = [&](Value &RV) -> bool {
436     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
437     const AAType &AA =
438         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
439     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
440                       << " @ " << RVPos << "\n");
441     const StateType &AAS = AA.getState();
442     if (T.hasValue())
443       *T &= AAS;
444     else
445       T = AAS;
446     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
447                       << "\n");
448     return T->isValidState();
449   };
450 
451   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
452     S.indicatePessimisticFixpoint();
453   else if (T.hasValue())
454     S ^= *T;
455 }
456 
457 /// Helper class for generic deduction: return value -> returned position.
458 template <typename AAType, typename BaseType,
459           typename StateType = typename BaseType::StateType,
460           bool PropagateCallBaseContext = false>
461 struct AAReturnedFromReturnedValues : public BaseType {
462   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
463       : BaseType(IRP, A) {}
464 
465   /// See AbstractAttribute::updateImpl(...).
466   ChangeStatus updateImpl(Attributor &A) override {
467     StateType S(StateType::getBestState(this->getState()));
468     clampReturnedValueStates<AAType, StateType>(
469         A, *this, S,
470         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
471     // TODO: If we know we visited all returned values, thus no are assumed
472     // dead, we can take the known information from the state T.
473     return clampStateAndIndicateChange<StateType>(this->getState(), S);
474   }
475 };
476 
477 /// Clamp the information known at all call sites for a given argument
478 /// (identified by \p QueryingAA) into \p S.
479 template <typename AAType, typename StateType = typename AAType::StateType>
480 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
481                                         StateType &S) {
482   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
483                     << QueryingAA << " into " << S << "\n");
484 
485   assert(QueryingAA.getIRPosition().getPositionKind() ==
486              IRPosition::IRP_ARGUMENT &&
487          "Can only clamp call site argument states for an argument position!");
488 
489   // Use an optional state as there might not be any return values and we want
490   // to join (IntegerState::operator&) the state of all there are.
491   Optional<StateType> T;
492 
493   // The argument number which is also the call site argument number.
494   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
495 
496   auto CallSiteCheck = [&](AbstractCallSite ACS) {
497     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
498     // Check if a coresponding argument was found or if it is on not associated
499     // (which can happen for callback calls).
500     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
501       return false;
502 
503     const AAType &AA =
504         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
505     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
506                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
507     const StateType &AAS = AA.getState();
508     if (T.hasValue())
509       *T &= AAS;
510     else
511       T = AAS;
512     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
513                       << "\n");
514     return T->isValidState();
515   };
516 
517   bool AllCallSitesKnown;
518   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
519                               AllCallSitesKnown))
520     S.indicatePessimisticFixpoint();
521   else if (T.hasValue())
522     S ^= *T;
523 }
524 
525 /// This function is the bridge between argument position and the call base
526 /// context.
527 template <typename AAType, typename BaseType,
528           typename StateType = typename AAType::StateType>
529 bool getArgumentStateFromCallBaseContext(Attributor &A,
530                                          BaseType &QueryingAttribute,
531                                          IRPosition &Pos, StateType &State) {
532   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
533          "Expected an 'argument' position !");
534   const CallBase *CBContext = Pos.getCallBaseContext();
535   if (!CBContext)
536     return false;
537 
538   int ArgNo = Pos.getCallSiteArgNo();
539   assert(ArgNo >= 0 && "Invalid Arg No!");
540 
541   const auto &AA = A.getAAFor<AAType>(
542       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
543       DepClassTy::REQUIRED);
544   const StateType &CBArgumentState =
545       static_cast<const StateType &>(AA.getState());
546 
547   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
548                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
549                     << "\n");
550 
551   // NOTE: If we want to do call site grouping it should happen here.
552   State ^= CBArgumentState;
553   return true;
554 }
555 
556 /// Helper class for generic deduction: call site argument -> argument position.
557 template <typename AAType, typename BaseType,
558           typename StateType = typename AAType::StateType,
559           bool BridgeCallBaseContext = false>
560 struct AAArgumentFromCallSiteArguments : public BaseType {
561   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
562       : BaseType(IRP, A) {}
563 
564   /// See AbstractAttribute::updateImpl(...).
565   ChangeStatus updateImpl(Attributor &A) override {
566     StateType S = StateType::getBestState(this->getState());
567 
568     if (BridgeCallBaseContext) {
569       bool Success =
570           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
571               A, *this, this->getIRPosition(), S);
572       if (Success)
573         return clampStateAndIndicateChange<StateType>(this->getState(), S);
574     }
575     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
576 
577     // TODO: If we know we visited all incoming values, thus no are assumed
578     // dead, we can take the known information from the state T.
579     return clampStateAndIndicateChange<StateType>(this->getState(), S);
580   }
581 };
582 
583 /// Helper class for generic replication: function returned -> cs returned.
584 template <typename AAType, typename BaseType,
585           typename StateType = typename BaseType::StateType,
586           bool IntroduceCallBaseContext = false>
587 struct AACallSiteReturnedFromReturned : public BaseType {
588   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
589       : BaseType(IRP, A) {}
590 
591   /// See AbstractAttribute::updateImpl(...).
592   ChangeStatus updateImpl(Attributor &A) override {
593     assert(this->getIRPosition().getPositionKind() ==
594                IRPosition::IRP_CALL_SITE_RETURNED &&
595            "Can only wrap function returned positions for call site returned "
596            "positions!");
597     auto &S = this->getState();
598 
599     const Function *AssociatedFunction =
600         this->getIRPosition().getAssociatedFunction();
601     if (!AssociatedFunction)
602       return S.indicatePessimisticFixpoint();
603 
604     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
605     if (IntroduceCallBaseContext)
606       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
607                         << CBContext << "\n");
608 
609     IRPosition FnPos = IRPosition::returned(
610         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
611     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
612     return clampStateAndIndicateChange(S, AA.getState());
613   }
614 };
615 
616 /// Helper function to accumulate uses.
617 template <class AAType, typename StateType = typename AAType::StateType>
618 static void followUsesInContext(AAType &AA, Attributor &A,
619                                 MustBeExecutedContextExplorer &Explorer,
620                                 const Instruction *CtxI,
621                                 SetVector<const Use *> &Uses,
622                                 StateType &State) {
623   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
624   for (unsigned u = 0; u < Uses.size(); ++u) {
625     const Use *U = Uses[u];
626     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
627       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
628       if (Found && AA.followUseInMBEC(A, U, UserI, State))
629         for (const Use &Us : UserI->uses())
630           Uses.insert(&Us);
631     }
632   }
633 }
634 
635 /// Use the must-be-executed-context around \p I to add information into \p S.
636 /// The AAType class is required to have `followUseInMBEC` method with the
637 /// following signature and behaviour:
638 ///
639 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
640 /// U - Underlying use.
641 /// I - The user of the \p U.
642 /// Returns true if the value should be tracked transitively.
643 ///
644 template <class AAType, typename StateType = typename AAType::StateType>
645 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
646                              Instruction &CtxI) {
647 
648   // Container for (transitive) uses of the associated value.
649   SetVector<const Use *> Uses;
650   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
651     Uses.insert(&U);
652 
653   MustBeExecutedContextExplorer &Explorer =
654       A.getInfoCache().getMustBeExecutedContextExplorer();
655 
656   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
657 
658   if (S.isAtFixpoint())
659     return;
660 
661   SmallVector<const BranchInst *, 4> BrInsts;
662   auto Pred = [&](const Instruction *I) {
663     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
664       if (Br->isConditional())
665         BrInsts.push_back(Br);
666     return true;
667   };
668 
669   // Here, accumulate conditional branch instructions in the context. We
670   // explore the child paths and collect the known states. The disjunction of
671   // those states can be merged to its own state. Let ParentState_i be a state
672   // to indicate the known information for an i-th branch instruction in the
673   // context. ChildStates are created for its successors respectively.
674   //
675   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
676   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
677   //      ...
678   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
679   //
680   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
681   //
682   // FIXME: Currently, recursive branches are not handled. For example, we
683   // can't deduce that ptr must be dereferenced in below function.
684   //
685   // void f(int a, int c, int *ptr) {
686   //    if(a)
687   //      if (b) {
688   //        *ptr = 0;
689   //      } else {
690   //        *ptr = 1;
691   //      }
692   //    else {
693   //      if (b) {
694   //        *ptr = 0;
695   //      } else {
696   //        *ptr = 1;
697   //      }
698   //    }
699   // }
700 
701   Explorer.checkForAllContext(&CtxI, Pred);
702   for (const BranchInst *Br : BrInsts) {
703     StateType ParentState;
704 
705     // The known state of the parent state is a conjunction of children's
706     // known states so it is initialized with a best state.
707     ParentState.indicateOptimisticFixpoint();
708 
709     for (const BasicBlock *BB : Br->successors()) {
710       StateType ChildState;
711 
712       size_t BeforeSize = Uses.size();
713       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
714 
715       // Erase uses which only appear in the child.
716       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
717         It = Uses.erase(It);
718 
719       ParentState &= ChildState;
720     }
721 
722     // Use only known state.
723     S += ParentState;
724   }
725 }
726 
727 /// -----------------------NoUnwind Function Attribute--------------------------
728 
729 struct AANoUnwindImpl : AANoUnwind {
730   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
731 
732   const std::string getAsStr() const override {
733     return getAssumed() ? "nounwind" : "may-unwind";
734   }
735 
736   /// See AbstractAttribute::updateImpl(...).
737   ChangeStatus updateImpl(Attributor &A) override {
738     auto Opcodes = {
739         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
740         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
741         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
742 
743     auto CheckForNoUnwind = [&](Instruction &I) {
744       if (!I.mayThrow())
745         return true;
746 
747       if (const auto *CB = dyn_cast<CallBase>(&I)) {
748         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
749             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
750         return NoUnwindAA.isAssumedNoUnwind();
751       }
752       return false;
753     };
754 
755     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
756       return indicatePessimisticFixpoint();
757 
758     return ChangeStatus::UNCHANGED;
759   }
760 };
761 
762 struct AANoUnwindFunction final : public AANoUnwindImpl {
763   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
764       : AANoUnwindImpl(IRP, A) {}
765 
766   /// See AbstractAttribute::trackStatistics()
767   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
768 };
769 
770 /// NoUnwind attribute deduction for a call sites.
771 struct AANoUnwindCallSite final : AANoUnwindImpl {
772   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
773       : AANoUnwindImpl(IRP, A) {}
774 
775   /// See AbstractAttribute::initialize(...).
776   void initialize(Attributor &A) override {
777     AANoUnwindImpl::initialize(A);
778     Function *F = getAssociatedFunction();
779     if (!F || F->isDeclaration())
780       indicatePessimisticFixpoint();
781   }
782 
783   /// See AbstractAttribute::updateImpl(...).
784   ChangeStatus updateImpl(Attributor &A) override {
785     // TODO: Once we have call site specific value information we can provide
786     //       call site specific liveness information and then it makes
787     //       sense to specialize attributes for call sites arguments instead of
788     //       redirecting requests to the callee argument.
789     Function *F = getAssociatedFunction();
790     const IRPosition &FnPos = IRPosition::function(*F);
791     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
792     return clampStateAndIndicateChange(getState(), FnAA.getState());
793   }
794 
795   /// See AbstractAttribute::trackStatistics()
796   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
797 };
798 
799 /// --------------------- Function Return Values -------------------------------
800 
801 /// "Attribute" that collects all potential returned values and the return
802 /// instructions that they arise from.
803 ///
804 /// If there is a unique returned value R, the manifest method will:
805 ///   - mark R with the "returned" attribute, if R is an argument.
806 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
807 
808   /// Mapping of values potentially returned by the associated function to the
809   /// return instructions that might return them.
810   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
811 
812   /// Mapping to remember the number of returned values for a call site such
813   /// that we can avoid updates if nothing changed.
814   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
815 
816   /// Set of unresolved calls returned by the associated function.
817   SmallSetVector<CallBase *, 4> UnresolvedCalls;
818 
819   /// State flags
820   ///
821   ///{
822   bool IsFixed = false;
823   bool IsValidState = true;
824   ///}
825 
826 public:
827   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
828       : AAReturnedValues(IRP, A) {}
829 
830   /// See AbstractAttribute::initialize(...).
831   void initialize(Attributor &A) override {
832     // Reset the state.
833     IsFixed = false;
834     IsValidState = true;
835     ReturnedValues.clear();
836 
837     Function *F = getAssociatedFunction();
838     if (!F || F->isDeclaration()) {
839       indicatePessimisticFixpoint();
840       return;
841     }
842     assert(!F->getReturnType()->isVoidTy() &&
843            "Did not expect a void return type!");
844 
845     // The map from instruction opcodes to those instructions in the function.
846     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
847 
848     // Look through all arguments, if one is marked as returned we are done.
849     for (Argument &Arg : F->args()) {
850       if (Arg.hasReturnedAttr()) {
851         auto &ReturnInstSet = ReturnedValues[&Arg];
852         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
853           for (Instruction *RI : *Insts)
854             ReturnInstSet.insert(cast<ReturnInst>(RI));
855 
856         indicateOptimisticFixpoint();
857         return;
858       }
859     }
860 
861     if (!A.isFunctionIPOAmendable(*F))
862       indicatePessimisticFixpoint();
863   }
864 
865   /// See AbstractAttribute::manifest(...).
866   ChangeStatus manifest(Attributor &A) override;
867 
868   /// See AbstractAttribute::getState(...).
869   AbstractState &getState() override { return *this; }
870 
871   /// See AbstractAttribute::getState(...).
872   const AbstractState &getState() const override { return *this; }
873 
874   /// See AbstractAttribute::updateImpl(Attributor &A).
875   ChangeStatus updateImpl(Attributor &A) override;
876 
877   llvm::iterator_range<iterator> returned_values() override {
878     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
879   }
880 
881   llvm::iterator_range<const_iterator> returned_values() const override {
882     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
883   }
884 
885   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
886     return UnresolvedCalls;
887   }
888 
889   /// Return the number of potential return values, -1 if unknown.
890   size_t getNumReturnValues() const override {
891     return isValidState() ? ReturnedValues.size() : -1;
892   }
893 
894   /// Return an assumed unique return value if a single candidate is found. If
895   /// there cannot be one, return a nullptr. If it is not clear yet, return the
896   /// Optional::NoneType.
897   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
898 
899   /// See AbstractState::checkForAllReturnedValues(...).
900   bool checkForAllReturnedValuesAndReturnInsts(
901       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
902       const override;
903 
904   /// Pretty print the attribute similar to the IR representation.
905   const std::string getAsStr() const override;
906 
907   /// See AbstractState::isAtFixpoint().
908   bool isAtFixpoint() const override { return IsFixed; }
909 
910   /// See AbstractState::isValidState().
911   bool isValidState() const override { return IsValidState; }
912 
913   /// See AbstractState::indicateOptimisticFixpoint(...).
914   ChangeStatus indicateOptimisticFixpoint() override {
915     IsFixed = true;
916     return ChangeStatus::UNCHANGED;
917   }
918 
919   ChangeStatus indicatePessimisticFixpoint() override {
920     IsFixed = true;
921     IsValidState = false;
922     return ChangeStatus::CHANGED;
923   }
924 };
925 
926 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
927   ChangeStatus Changed = ChangeStatus::UNCHANGED;
928 
929   // Bookkeeping.
930   assert(isValidState());
931   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
932                   "Number of function with known return values");
933 
934   // Check if we have an assumed unique return value that we could manifest.
935   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
936 
937   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
938     return Changed;
939 
940   // Bookkeeping.
941   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
942                   "Number of function with unique return");
943 
944   // Callback to replace the uses of CB with the constant C.
945   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
946     if (CB.use_empty())
947       return ChangeStatus::UNCHANGED;
948     if (A.changeValueAfterManifest(CB, C))
949       return ChangeStatus::CHANGED;
950     return ChangeStatus::UNCHANGED;
951   };
952 
953   // If the assumed unique return value is an argument, annotate it.
954   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
955     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
956             getAssociatedFunction()->getReturnType())) {
957       getIRPosition() = IRPosition::argument(*UniqueRVArg);
958       Changed = IRAttribute::manifest(A);
959     }
960   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
961     // We can replace the returned value with the unique returned constant.
962     Value &AnchorValue = getAnchorValue();
963     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
964       for (const Use &U : F->uses())
965         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
966           if (CB->isCallee(&U)) {
967             Constant *RVCCast =
968                 CB->getType() == RVC->getType()
969                     ? RVC
970                     : ConstantExpr::getPointerCast(RVC, CB->getType());
971             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
972           }
973     } else {
974       assert(isa<CallBase>(AnchorValue) &&
975              "Expcected a function or call base anchor!");
976       Constant *RVCCast =
977           AnchorValue.getType() == RVC->getType()
978               ? RVC
979               : ConstantExpr::getPointerCast(RVC, AnchorValue.getType());
980       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
981     }
982     if (Changed == ChangeStatus::CHANGED)
983       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
984                       "Number of function returns replaced by constant return");
985   }
986 
987   return Changed;
988 }
989 
990 const std::string AAReturnedValuesImpl::getAsStr() const {
991   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
992          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
993          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
994 }
995 
996 Optional<Value *>
997 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
998   // If checkForAllReturnedValues provides a unique value, ignoring potential
999   // undef values that can also be present, it is assumed to be the actual
1000   // return value and forwarded to the caller of this method. If there are
1001   // multiple, a nullptr is returned indicating there cannot be a unique
1002   // returned value.
1003   Optional<Value *> UniqueRV;
1004 
1005   auto Pred = [&](Value &RV) -> bool {
1006     // If we found a second returned value and neither the current nor the saved
1007     // one is an undef, there is no unique returned value. Undefs are special
1008     // since we can pretend they have any value.
1009     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1010         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1011       UniqueRV = nullptr;
1012       return false;
1013     }
1014 
1015     // Do not overwrite a value with an undef.
1016     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1017       UniqueRV = &RV;
1018 
1019     return true;
1020   };
1021 
1022   if (!A.checkForAllReturnedValues(Pred, *this))
1023     UniqueRV = nullptr;
1024 
1025   return UniqueRV;
1026 }
1027 
1028 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1029     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1030     const {
1031   if (!isValidState())
1032     return false;
1033 
1034   // Check all returned values but ignore call sites as long as we have not
1035   // encountered an overdefined one during an update.
1036   for (auto &It : ReturnedValues) {
1037     Value *RV = It.first;
1038 
1039     CallBase *CB = dyn_cast<CallBase>(RV);
1040     if (CB && !UnresolvedCalls.count(CB))
1041       continue;
1042 
1043     if (!Pred(*RV, It.second))
1044       return false;
1045   }
1046 
1047   return true;
1048 }
1049 
1050 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1051   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1052   bool Changed = false;
1053 
1054   // State used in the value traversals starting in returned values.
1055   struct RVState {
1056     // The map in which we collect return values -> return instrs.
1057     decltype(ReturnedValues) &RetValsMap;
1058     // The flag to indicate a change.
1059     bool &Changed;
1060     // The return instrs we come from.
1061     SmallSetVector<ReturnInst *, 4> RetInsts;
1062   };
1063 
1064   // Callback for a leaf value returned by the associated function.
1065   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1066                          bool) -> bool {
1067     auto Size = RVS.RetValsMap[&Val].size();
1068     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1069     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1070     RVS.Changed |= Inserted;
1071     LLVM_DEBUG({
1072       if (Inserted)
1073         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1074                << " => " << RVS.RetInsts.size() << "\n";
1075     });
1076     return true;
1077   };
1078 
1079   // Helper method to invoke the generic value traversal.
1080   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1081                                 const Instruction *CtxI) {
1082     IRPosition RetValPos = IRPosition::value(RV, getCallBaseContext());
1083     return genericValueTraversal<AAReturnedValues, RVState>(
1084         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1085         /* UseValueSimplify */ false);
1086   };
1087 
1088   // Callback for all "return intructions" live in the associated function.
1089   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1090     ReturnInst &Ret = cast<ReturnInst>(I);
1091     RVState RVS({ReturnedValues, Changed, {}});
1092     RVS.RetInsts.insert(&Ret);
1093     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1094   };
1095 
1096   // Start by discovering returned values from all live returned instructions in
1097   // the associated function.
1098   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1099     return indicatePessimisticFixpoint();
1100 
1101   // Once returned values "directly" present in the code are handled we try to
1102   // resolve returned calls. To avoid modifications to the ReturnedValues map
1103   // while we iterate over it we kept record of potential new entries in a copy
1104   // map, NewRVsMap.
1105   decltype(ReturnedValues) NewRVsMap;
1106 
1107   auto HandleReturnValue = [&](Value *RV,
1108                                SmallSetVector<ReturnInst *, 4> &RIs) {
1109     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1110                       << RIs.size() << " RIs\n");
1111     CallBase *CB = dyn_cast<CallBase>(RV);
1112     if (!CB || UnresolvedCalls.count(CB))
1113       return;
1114 
1115     if (!CB->getCalledFunction()) {
1116       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1117                         << "\n");
1118       UnresolvedCalls.insert(CB);
1119       return;
1120     }
1121 
1122     // TODO: use the function scope once we have call site AAReturnedValues.
1123     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1124         *this, IRPosition::function(*CB->getCalledFunction()),
1125         DepClassTy::REQUIRED);
1126     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1127                       << RetValAA << "\n");
1128 
1129     // Skip dead ends, thus if we do not know anything about the returned
1130     // call we mark it as unresolved and it will stay that way.
1131     if (!RetValAA.getState().isValidState()) {
1132       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1133                         << "\n");
1134       UnresolvedCalls.insert(CB);
1135       return;
1136     }
1137 
1138     // Do not try to learn partial information. If the callee has unresolved
1139     // return values we will treat the call as unresolved/opaque.
1140     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1141     if (!RetValAAUnresolvedCalls.empty()) {
1142       UnresolvedCalls.insert(CB);
1143       return;
1144     }
1145 
1146     // Now check if we can track transitively returned values. If possible, thus
1147     // if all return value can be represented in the current scope, do so.
1148     bool Unresolved = false;
1149     for (auto &RetValAAIt : RetValAA.returned_values()) {
1150       Value *RetVal = RetValAAIt.first;
1151       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1152           isa<Constant>(RetVal))
1153         continue;
1154       // Anything that did not fit in the above categories cannot be resolved,
1155       // mark the call as unresolved.
1156       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1157                            "cannot be translated: "
1158                         << *RetVal << "\n");
1159       UnresolvedCalls.insert(CB);
1160       Unresolved = true;
1161       break;
1162     }
1163 
1164     if (Unresolved)
1165       return;
1166 
1167     // Now track transitively returned values.
1168     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1169     if (NumRetAA == RetValAA.getNumReturnValues()) {
1170       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1171                            "changed since it was seen last\n");
1172       return;
1173     }
1174     NumRetAA = RetValAA.getNumReturnValues();
1175 
1176     for (auto &RetValAAIt : RetValAA.returned_values()) {
1177       Value *RetVal = RetValAAIt.first;
1178       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1179         // Arguments are mapped to call site operands and we begin the traversal
1180         // again.
1181         bool Unused = false;
1182         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1183         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1184         continue;
1185       }
1186       if (isa<CallBase>(RetVal)) {
1187         // Call sites are resolved by the callee attribute over time, no need to
1188         // do anything for us.
1189         continue;
1190       }
1191       if (isa<Constant>(RetVal)) {
1192         // Constants are valid everywhere, we can simply take them.
1193         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1194         continue;
1195       }
1196     }
1197   };
1198 
1199   for (auto &It : ReturnedValues)
1200     HandleReturnValue(It.first, It.second);
1201 
1202   // Because processing the new information can again lead to new return values
1203   // we have to be careful and iterate until this iteration is complete. The
1204   // idea is that we are in a stable state at the end of an update. All return
1205   // values have been handled and properly categorized. We might not update
1206   // again if we have not requested a non-fix attribute so we cannot "wait" for
1207   // the next update to analyze a new return value.
1208   while (!NewRVsMap.empty()) {
1209     auto It = std::move(NewRVsMap.back());
1210     NewRVsMap.pop_back();
1211 
1212     assert(!It.second.empty() && "Entry does not add anything.");
1213     auto &ReturnInsts = ReturnedValues[It.first];
1214     for (ReturnInst *RI : It.second)
1215       if (ReturnInsts.insert(RI)) {
1216         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1217                           << *It.first << " => " << *RI << "\n");
1218         HandleReturnValue(It.first, ReturnInsts);
1219         Changed = true;
1220       }
1221   }
1222 
1223   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1224   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1225 }
1226 
1227 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1228   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1229       : AAReturnedValuesImpl(IRP, A) {}
1230 
1231   /// See AbstractAttribute::trackStatistics()
1232   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1233 };
1234 
1235 /// Returned values information for a call sites.
1236 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1237   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1238       : AAReturnedValuesImpl(IRP, A) {}
1239 
1240   /// See AbstractAttribute::initialize(...).
1241   void initialize(Attributor &A) override {
1242     // TODO: Once we have call site specific value information we can provide
1243     //       call site specific liveness information and then it makes
1244     //       sense to specialize attributes for call sites instead of
1245     //       redirecting requests to the callee.
1246     llvm_unreachable("Abstract attributes for returned values are not "
1247                      "supported for call sites yet!");
1248   }
1249 
1250   /// See AbstractAttribute::updateImpl(...).
1251   ChangeStatus updateImpl(Attributor &A) override {
1252     return indicatePessimisticFixpoint();
1253   }
1254 
1255   /// See AbstractAttribute::trackStatistics()
1256   void trackStatistics() const override {}
1257 };
1258 
1259 /// ------------------------ NoSync Function Attribute -------------------------
1260 
1261 struct AANoSyncImpl : AANoSync {
1262   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1263 
1264   const std::string getAsStr() const override {
1265     return getAssumed() ? "nosync" : "may-sync";
1266   }
1267 
1268   /// See AbstractAttribute::updateImpl(...).
1269   ChangeStatus updateImpl(Attributor &A) override;
1270 
1271   /// Helper function used to determine whether an instruction is non-relaxed
1272   /// atomic. In other words, if an atomic instruction does not have unordered
1273   /// or monotonic ordering
1274   static bool isNonRelaxedAtomic(Instruction *I);
1275 
1276   /// Helper function specific for intrinsics which are potentially volatile
1277   static bool isNoSyncIntrinsic(Instruction *I);
1278 };
1279 
1280 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1281   if (!I->isAtomic())
1282     return false;
1283 
1284   if (auto *FI = dyn_cast<FenceInst>(I))
1285     // All legal orderings for fence are stronger than monotonic.
1286     return FI->getSyncScopeID() != SyncScope::SingleThread;
1287   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1288     // Unordered is not a legal ordering for cmpxchg.
1289     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1290             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1291   }
1292 
1293   AtomicOrdering Ordering;
1294   switch (I->getOpcode()) {
1295   case Instruction::AtomicRMW:
1296     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1297     break;
1298   case Instruction::Store:
1299     Ordering = cast<StoreInst>(I)->getOrdering();
1300     break;
1301   case Instruction::Load:
1302     Ordering = cast<LoadInst>(I)->getOrdering();
1303     break;
1304   default:
1305     llvm_unreachable(
1306         "New atomic operations need to be known in the attributor.");
1307   }
1308 
1309   return (Ordering != AtomicOrdering::Unordered &&
1310           Ordering != AtomicOrdering::Monotonic);
1311 }
1312 
1313 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1314 /// which would be nosync except that they have a volatile flag.  All other
1315 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1316 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1317   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1318     return !MI->isVolatile();
1319   return false;
1320 }
1321 
1322 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1323 
1324   auto CheckRWInstForNoSync = [&](Instruction &I) {
1325     /// We are looking for volatile instructions or Non-Relaxed atomics.
1326 
1327     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1328       if (CB->hasFnAttr(Attribute::NoSync))
1329         return true;
1330 
1331       if (isNoSyncIntrinsic(&I))
1332         return true;
1333 
1334       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1335           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1336       return NoSyncAA.isAssumedNoSync();
1337     }
1338 
1339     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1340       return true;
1341 
1342     return false;
1343   };
1344 
1345   auto CheckForNoSync = [&](Instruction &I) {
1346     // At this point we handled all read/write effects and they are all
1347     // nosync, so they can be skipped.
1348     if (I.mayReadOrWriteMemory())
1349       return true;
1350 
1351     // non-convergent and readnone imply nosync.
1352     return !cast<CallBase>(I).isConvergent();
1353   };
1354 
1355   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1356       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1357     return indicatePessimisticFixpoint();
1358 
1359   return ChangeStatus::UNCHANGED;
1360 }
1361 
1362 struct AANoSyncFunction final : public AANoSyncImpl {
1363   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1364       : AANoSyncImpl(IRP, A) {}
1365 
1366   /// See AbstractAttribute::trackStatistics()
1367   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1368 };
1369 
1370 /// NoSync attribute deduction for a call sites.
1371 struct AANoSyncCallSite final : AANoSyncImpl {
1372   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1373       : AANoSyncImpl(IRP, A) {}
1374 
1375   /// See AbstractAttribute::initialize(...).
1376   void initialize(Attributor &A) override {
1377     AANoSyncImpl::initialize(A);
1378     Function *F = getAssociatedFunction();
1379     if (!F || F->isDeclaration())
1380       indicatePessimisticFixpoint();
1381   }
1382 
1383   /// See AbstractAttribute::updateImpl(...).
1384   ChangeStatus updateImpl(Attributor &A) override {
1385     // TODO: Once we have call site specific value information we can provide
1386     //       call site specific liveness information and then it makes
1387     //       sense to specialize attributes for call sites arguments instead of
1388     //       redirecting requests to the callee argument.
1389     Function *F = getAssociatedFunction();
1390     const IRPosition &FnPos = IRPosition::function(*F);
1391     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1392     return clampStateAndIndicateChange(getState(), FnAA.getState());
1393   }
1394 
1395   /// See AbstractAttribute::trackStatistics()
1396   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1397 };
1398 
1399 /// ------------------------ No-Free Attributes ----------------------------
1400 
1401 struct AANoFreeImpl : public AANoFree {
1402   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1403 
1404   /// See AbstractAttribute::updateImpl(...).
1405   ChangeStatus updateImpl(Attributor &A) override {
1406     auto CheckForNoFree = [&](Instruction &I) {
1407       const auto &CB = cast<CallBase>(I);
1408       if (CB.hasFnAttr(Attribute::NoFree))
1409         return true;
1410 
1411       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1412           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1413       return NoFreeAA.isAssumedNoFree();
1414     };
1415 
1416     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1417       return indicatePessimisticFixpoint();
1418     return ChangeStatus::UNCHANGED;
1419   }
1420 
1421   /// See AbstractAttribute::getAsStr().
1422   const std::string getAsStr() const override {
1423     return getAssumed() ? "nofree" : "may-free";
1424   }
1425 };
1426 
1427 struct AANoFreeFunction final : public AANoFreeImpl {
1428   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1429       : AANoFreeImpl(IRP, A) {}
1430 
1431   /// See AbstractAttribute::trackStatistics()
1432   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1433 };
1434 
1435 /// NoFree attribute deduction for a call sites.
1436 struct AANoFreeCallSite final : AANoFreeImpl {
1437   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1438       : AANoFreeImpl(IRP, A) {}
1439 
1440   /// See AbstractAttribute::initialize(...).
1441   void initialize(Attributor &A) override {
1442     AANoFreeImpl::initialize(A);
1443     Function *F = getAssociatedFunction();
1444     if (!F || F->isDeclaration())
1445       indicatePessimisticFixpoint();
1446   }
1447 
1448   /// See AbstractAttribute::updateImpl(...).
1449   ChangeStatus updateImpl(Attributor &A) override {
1450     // TODO: Once we have call site specific value information we can provide
1451     //       call site specific liveness information and then it makes
1452     //       sense to specialize attributes for call sites arguments instead of
1453     //       redirecting requests to the callee argument.
1454     Function *F = getAssociatedFunction();
1455     const IRPosition &FnPos = IRPosition::function(*F);
1456     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1457     return clampStateAndIndicateChange(getState(), FnAA.getState());
1458   }
1459 
1460   /// See AbstractAttribute::trackStatistics()
1461   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1462 };
1463 
1464 /// NoFree attribute for floating values.
1465 struct AANoFreeFloating : AANoFreeImpl {
1466   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1467       : AANoFreeImpl(IRP, A) {}
1468 
1469   /// See AbstractAttribute::trackStatistics()
1470   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1471 
1472   /// See Abstract Attribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     const IRPosition &IRP = getIRPosition();
1475 
1476     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1477         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1478     if (NoFreeAA.isAssumedNoFree())
1479       return ChangeStatus::UNCHANGED;
1480 
1481     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1482     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1483       Instruction *UserI = cast<Instruction>(U.getUser());
1484       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1485         if (CB->isBundleOperand(&U))
1486           return false;
1487         if (!CB->isArgOperand(&U))
1488           return true;
1489         unsigned ArgNo = CB->getArgOperandNo(&U);
1490 
1491         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1492             *this, IRPosition::callsite_argument(*CB, ArgNo),
1493             DepClassTy::REQUIRED);
1494         return NoFreeArg.isAssumedNoFree();
1495       }
1496 
1497       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1498           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1499         Follow = true;
1500         return true;
1501       }
1502       if (isa<ReturnInst>(UserI))
1503         return true;
1504 
1505       // Unknown user.
1506       return false;
1507     };
1508     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1509       return indicatePessimisticFixpoint();
1510 
1511     return ChangeStatus::UNCHANGED;
1512   }
1513 };
1514 
1515 /// NoFree attribute for a call site argument.
1516 struct AANoFreeArgument final : AANoFreeFloating {
1517   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1518       : AANoFreeFloating(IRP, A) {}
1519 
1520   /// See AbstractAttribute::trackStatistics()
1521   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1522 };
1523 
1524 /// NoFree attribute for call site arguments.
1525 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1526   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1527       : AANoFreeFloating(IRP, A) {}
1528 
1529   /// See AbstractAttribute::updateImpl(...).
1530   ChangeStatus updateImpl(Attributor &A) override {
1531     // TODO: Once we have call site specific value information we can provide
1532     //       call site specific liveness information and then it makes
1533     //       sense to specialize attributes for call sites arguments instead of
1534     //       redirecting requests to the callee argument.
1535     Argument *Arg = getAssociatedArgument();
1536     if (!Arg)
1537       return indicatePessimisticFixpoint();
1538     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1539     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1540     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1541   }
1542 
1543   /// See AbstractAttribute::trackStatistics()
1544   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1545 };
1546 
1547 /// NoFree attribute for function return value.
1548 struct AANoFreeReturned final : AANoFreeFloating {
1549   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1550       : AANoFreeFloating(IRP, A) {
1551     llvm_unreachable("NoFree is not applicable to function returns!");
1552   }
1553 
1554   /// See AbstractAttribute::initialize(...).
1555   void initialize(Attributor &A) override {
1556     llvm_unreachable("NoFree is not applicable to function returns!");
1557   }
1558 
1559   /// See AbstractAttribute::updateImpl(...).
1560   ChangeStatus updateImpl(Attributor &A) override {
1561     llvm_unreachable("NoFree is not applicable to function returns!");
1562   }
1563 
1564   /// See AbstractAttribute::trackStatistics()
1565   void trackStatistics() const override {}
1566 };
1567 
1568 /// NoFree attribute deduction for a call site return value.
1569 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1570   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1571       : AANoFreeFloating(IRP, A) {}
1572 
1573   ChangeStatus manifest(Attributor &A) override {
1574     return ChangeStatus::UNCHANGED;
1575   }
1576   /// See AbstractAttribute::trackStatistics()
1577   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1578 };
1579 
1580 /// ------------------------ NonNull Argument Attribute ------------------------
1581 static int64_t getKnownNonNullAndDerefBytesForUse(
1582     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1583     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1584   TrackUse = false;
1585 
1586   const Value *UseV = U->get();
1587   if (!UseV->getType()->isPointerTy())
1588     return 0;
1589 
1590   // We need to follow common pointer manipulation uses to the accesses they
1591   // feed into. We can try to be smart to avoid looking through things we do not
1592   // like for now, e.g., non-inbounds GEPs.
1593   if (isa<CastInst>(I)) {
1594     TrackUse = true;
1595     return 0;
1596   }
1597 
1598   if (isa<GetElementPtrInst>(I)) {
1599     TrackUse = true;
1600     return 0;
1601   }
1602 
1603   Type *PtrTy = UseV->getType();
1604   const Function *F = I->getFunction();
1605   bool NullPointerIsDefined =
1606       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1607   const DataLayout &DL = A.getInfoCache().getDL();
1608   if (const auto *CB = dyn_cast<CallBase>(I)) {
1609     if (CB->isBundleOperand(U)) {
1610       if (RetainedKnowledge RK = getKnowledgeFromUse(
1611               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1612         IsNonNull |=
1613             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1614         return RK.ArgValue;
1615       }
1616       return 0;
1617     }
1618 
1619     if (CB->isCallee(U)) {
1620       IsNonNull |= !NullPointerIsDefined;
1621       return 0;
1622     }
1623 
1624     unsigned ArgNo = CB->getArgOperandNo(U);
1625     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1626     // As long as we only use known information there is no need to track
1627     // dependences here.
1628     auto &DerefAA =
1629         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1630     IsNonNull |= DerefAA.isKnownNonNull();
1631     return DerefAA.getKnownDereferenceableBytes();
1632   }
1633 
1634   int64_t Offset;
1635   const Value *Base =
1636       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1637   if (Base) {
1638     if (Base == &AssociatedValue &&
1639         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1640       int64_t DerefBytes =
1641           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1642 
1643       IsNonNull |= !NullPointerIsDefined;
1644       return std::max(int64_t(0), DerefBytes);
1645     }
1646   }
1647 
1648   /// Corner case when an offset is 0.
1649   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1650                                               /*AllowNonInbounds*/ true);
1651   if (Base) {
1652     if (Offset == 0 && Base == &AssociatedValue &&
1653         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1654       int64_t DerefBytes =
1655           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1656       IsNonNull |= !NullPointerIsDefined;
1657       return std::max(int64_t(0), DerefBytes);
1658     }
1659   }
1660 
1661   return 0;
1662 }
1663 
1664 struct AANonNullImpl : AANonNull {
1665   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1666       : AANonNull(IRP, A),
1667         NullIsDefined(NullPointerIsDefined(
1668             getAnchorScope(),
1669             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1670 
1671   /// See AbstractAttribute::initialize(...).
1672   void initialize(Attributor &A) override {
1673     Value &V = getAssociatedValue();
1674     if (!NullIsDefined &&
1675         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1676                 /* IgnoreSubsumingPositions */ false, &A)) {
1677       indicateOptimisticFixpoint();
1678       return;
1679     }
1680 
1681     if (isa<ConstantPointerNull>(V)) {
1682       indicatePessimisticFixpoint();
1683       return;
1684     }
1685 
1686     AANonNull::initialize(A);
1687 
1688     bool CanBeNull, CanBeFreed;
1689     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1690                                          CanBeFreed)) {
1691       if (!CanBeNull) {
1692         indicateOptimisticFixpoint();
1693         return;
1694       }
1695     }
1696 
1697     if (isa<GlobalValue>(&getAssociatedValue())) {
1698       indicatePessimisticFixpoint();
1699       return;
1700     }
1701 
1702     if (Instruction *CtxI = getCtxI())
1703       followUsesInMBEC(*this, A, getState(), *CtxI);
1704   }
1705 
1706   /// See followUsesInMBEC
1707   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1708                        AANonNull::StateType &State) {
1709     bool IsNonNull = false;
1710     bool TrackUse = false;
1711     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1712                                        IsNonNull, TrackUse);
1713     State.setKnown(IsNonNull);
1714     return TrackUse;
1715   }
1716 
1717   /// See AbstractAttribute::getAsStr().
1718   const std::string getAsStr() const override {
1719     return getAssumed() ? "nonnull" : "may-null";
1720   }
1721 
1722   /// Flag to determine if the underlying value can be null and still allow
1723   /// valid accesses.
1724   const bool NullIsDefined;
1725 };
1726 
1727 /// NonNull attribute for a floating value.
1728 struct AANonNullFloating : public AANonNullImpl {
1729   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1730       : AANonNullImpl(IRP, A) {}
1731 
1732   /// See AbstractAttribute::updateImpl(...).
1733   ChangeStatus updateImpl(Attributor &A) override {
1734     const DataLayout &DL = A.getDataLayout();
1735 
1736     DominatorTree *DT = nullptr;
1737     AssumptionCache *AC = nullptr;
1738     InformationCache &InfoCache = A.getInfoCache();
1739     if (const Function *Fn = getAnchorScope()) {
1740       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1741       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1742     }
1743 
1744     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1745                             AANonNull::StateType &T, bool Stripped) -> bool {
1746       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1747                                              DepClassTy::REQUIRED);
1748       if (!Stripped && this == &AA) {
1749         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1750           T.indicatePessimisticFixpoint();
1751       } else {
1752         // Use abstract attribute information.
1753         const AANonNull::StateType &NS = AA.getState();
1754         T ^= NS;
1755       }
1756       return T.isValidState();
1757     };
1758 
1759     StateType T;
1760     if (!genericValueTraversal<AANonNull, StateType>(
1761             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1762       return indicatePessimisticFixpoint();
1763 
1764     return clampStateAndIndicateChange(getState(), T);
1765   }
1766 
1767   /// See AbstractAttribute::trackStatistics()
1768   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1769 };
1770 
1771 /// NonNull attribute for function return value.
1772 struct AANonNullReturned final
1773     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1774   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1775       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1776 
1777   /// See AbstractAttribute::getAsStr().
1778   const std::string getAsStr() const override {
1779     return getAssumed() ? "nonnull" : "may-null";
1780   }
1781 
1782   /// See AbstractAttribute::trackStatistics()
1783   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1784 };
1785 
1786 /// NonNull attribute for function argument.
1787 struct AANonNullArgument final
1788     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1789   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1790       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1791 
1792   /// See AbstractAttribute::trackStatistics()
1793   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1794 };
1795 
1796 struct AANonNullCallSiteArgument final : AANonNullFloating {
1797   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1798       : AANonNullFloating(IRP, A) {}
1799 
1800   /// See AbstractAttribute::trackStatistics()
1801   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1802 };
1803 
1804 /// NonNull attribute for a call site return position.
1805 struct AANonNullCallSiteReturned final
1806     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1807   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1808       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1809 
1810   /// See AbstractAttribute::trackStatistics()
1811   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1812 };
1813 
1814 /// ------------------------ No-Recurse Attributes ----------------------------
1815 
1816 struct AANoRecurseImpl : public AANoRecurse {
1817   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1818 
1819   /// See AbstractAttribute::getAsStr()
1820   const std::string getAsStr() const override {
1821     return getAssumed() ? "norecurse" : "may-recurse";
1822   }
1823 };
1824 
1825 struct AANoRecurseFunction final : AANoRecurseImpl {
1826   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1827       : AANoRecurseImpl(IRP, A) {}
1828 
1829   /// See AbstractAttribute::initialize(...).
1830   void initialize(Attributor &A) override {
1831     AANoRecurseImpl::initialize(A);
1832     if (const Function *F = getAnchorScope())
1833       if (A.getInfoCache().getSccSize(*F) != 1)
1834         indicatePessimisticFixpoint();
1835   }
1836 
1837   /// See AbstractAttribute::updateImpl(...).
1838   ChangeStatus updateImpl(Attributor &A) override {
1839 
1840     // If all live call sites are known to be no-recurse, we are as well.
1841     auto CallSitePred = [&](AbstractCallSite ACS) {
1842       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1843           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1844           DepClassTy::NONE);
1845       return NoRecurseAA.isKnownNoRecurse();
1846     };
1847     bool AllCallSitesKnown;
1848     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1849       // If we know all call sites and all are known no-recurse, we are done.
1850       // If all known call sites, which might not be all that exist, are known
1851       // to be no-recurse, we are not done but we can continue to assume
1852       // no-recurse. If one of the call sites we have not visited will become
1853       // live, another update is triggered.
1854       if (AllCallSitesKnown)
1855         indicateOptimisticFixpoint();
1856       return ChangeStatus::UNCHANGED;
1857     }
1858 
1859     // If the above check does not hold anymore we look at the calls.
1860     auto CheckForNoRecurse = [&](Instruction &I) {
1861       const auto &CB = cast<CallBase>(I);
1862       if (CB.hasFnAttr(Attribute::NoRecurse))
1863         return true;
1864 
1865       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1866           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1867       if (!NoRecurseAA.isAssumedNoRecurse())
1868         return false;
1869 
1870       // Recursion to the same function
1871       if (CB.getCalledFunction() == getAnchorScope())
1872         return false;
1873 
1874       return true;
1875     };
1876 
1877     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1878       return indicatePessimisticFixpoint();
1879     return ChangeStatus::UNCHANGED;
1880   }
1881 
1882   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1883 };
1884 
1885 /// NoRecurse attribute deduction for a call sites.
1886 struct AANoRecurseCallSite final : AANoRecurseImpl {
1887   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1888       : AANoRecurseImpl(IRP, A) {}
1889 
1890   /// See AbstractAttribute::initialize(...).
1891   void initialize(Attributor &A) override {
1892     AANoRecurseImpl::initialize(A);
1893     Function *F = getAssociatedFunction();
1894     if (!F || F->isDeclaration())
1895       indicatePessimisticFixpoint();
1896   }
1897 
1898   /// See AbstractAttribute::updateImpl(...).
1899   ChangeStatus updateImpl(Attributor &A) override {
1900     // TODO: Once we have call site specific value information we can provide
1901     //       call site specific liveness information and then it makes
1902     //       sense to specialize attributes for call sites arguments instead of
1903     //       redirecting requests to the callee argument.
1904     Function *F = getAssociatedFunction();
1905     const IRPosition &FnPos = IRPosition::function(*F);
1906     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1907     return clampStateAndIndicateChange(getState(), FnAA.getState());
1908   }
1909 
1910   /// See AbstractAttribute::trackStatistics()
1911   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1912 };
1913 
1914 /// -------------------- Undefined-Behavior Attributes ------------------------
1915 
1916 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1917   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1918       : AAUndefinedBehavior(IRP, A) {}
1919 
1920   /// See AbstractAttribute::updateImpl(...).
1921   // through a pointer (i.e. also branches etc.)
1922   ChangeStatus updateImpl(Attributor &A) override {
1923     const size_t UBPrevSize = KnownUBInsts.size();
1924     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1925 
1926     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1927       // Skip instructions that are already saved.
1928       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1929         return true;
1930 
1931       // If we reach here, we know we have an instruction
1932       // that accesses memory through a pointer operand,
1933       // for which getPointerOperand() should give it to us.
1934       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1935       assert(PtrOp &&
1936              "Expected pointer operand of memory accessing instruction");
1937 
1938       // Either we stopped and the appropriate action was taken,
1939       // or we got back a simplified value to continue.
1940       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1941       if (!SimplifiedPtrOp.hasValue())
1942         return true;
1943       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1944 
1945       // A memory access through a pointer is considered UB
1946       // only if the pointer has constant null value.
1947       // TODO: Expand it to not only check constant values.
1948       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1949         AssumedNoUBInsts.insert(&I);
1950         return true;
1951       }
1952       const Type *PtrTy = PtrOpVal->getType();
1953 
1954       // Because we only consider instructions inside functions,
1955       // assume that a parent function exists.
1956       const Function *F = I.getFunction();
1957 
1958       // A memory access using constant null pointer is only considered UB
1959       // if null pointer is _not_ defined for the target platform.
1960       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1961         AssumedNoUBInsts.insert(&I);
1962       else
1963         KnownUBInsts.insert(&I);
1964       return true;
1965     };
1966 
1967     auto InspectBrInstForUB = [&](Instruction &I) {
1968       // A conditional branch instruction is considered UB if it has `undef`
1969       // condition.
1970 
1971       // Skip instructions that are already saved.
1972       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1973         return true;
1974 
1975       // We know we have a branch instruction.
1976       auto BrInst = cast<BranchInst>(&I);
1977 
1978       // Unconditional branches are never considered UB.
1979       if (BrInst->isUnconditional())
1980         return true;
1981 
1982       // Either we stopped and the appropriate action was taken,
1983       // or we got back a simplified value to continue.
1984       Optional<Value *> SimplifiedCond =
1985           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1986       if (!SimplifiedCond.hasValue())
1987         return true;
1988       AssumedNoUBInsts.insert(&I);
1989       return true;
1990     };
1991 
1992     auto InspectCallSiteForUB = [&](Instruction &I) {
1993       // Check whether a callsite always cause UB or not
1994 
1995       // Skip instructions that are already saved.
1996       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1997         return true;
1998 
1999       // Check nonnull and noundef argument attribute violation for each
2000       // callsite.
2001       CallBase &CB = cast<CallBase>(I);
2002       Function *Callee = CB.getCalledFunction();
2003       if (!Callee)
2004         return true;
2005       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2006         // If current argument is known to be simplified to null pointer and the
2007         // corresponding argument position is known to have nonnull attribute,
2008         // the argument is poison. Furthermore, if the argument is poison and
2009         // the position is known to have noundef attriubte, this callsite is
2010         // considered UB.
2011         if (idx >= Callee->arg_size())
2012           break;
2013         Value *ArgVal = CB.getArgOperand(idx);
2014         if (!ArgVal)
2015           continue;
2016         // Here, we handle three cases.
2017         //   (1) Not having a value means it is dead. (we can replace the value
2018         //       with undef)
2019         //   (2) Simplified to undef. The argument violate noundef attriubte.
2020         //   (3) Simplified to null pointer where known to be nonnull.
2021         //       The argument is a poison value and violate noundef attribute.
2022         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2023         auto &NoUndefAA =
2024             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2025         if (!NoUndefAA.isKnownNoUndef())
2026           continue;
2027         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2028             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2029         if (!ValueSimplifyAA.isKnown())
2030           continue;
2031         Optional<Value *> SimplifiedVal =
2032             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2033         if (!SimplifiedVal.hasValue() ||
2034             isa<UndefValue>(*SimplifiedVal.getValue())) {
2035           KnownUBInsts.insert(&I);
2036           continue;
2037         }
2038         if (!ArgVal->getType()->isPointerTy() ||
2039             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2040           continue;
2041         auto &NonNullAA =
2042             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2043         if (NonNullAA.isKnownNonNull())
2044           KnownUBInsts.insert(&I);
2045       }
2046       return true;
2047     };
2048 
2049     auto InspectReturnInstForUB =
2050         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2051           // Check if a return instruction always cause UB or not
2052           // Note: It is guaranteed that the returned position of the anchor
2053           //       scope has noundef attribute when this is called.
2054           //       We also ensure the return position is not "assumed dead"
2055           //       because the returned value was then potentially simplified to
2056           //       `undef` in AAReturnedValues without removing the `noundef`
2057           //       attribute yet.
2058 
2059           // When the returned position has noundef attriubte, UB occur in the
2060           // following cases.
2061           //   (1) Returned value is known to be undef.
2062           //   (2) The value is known to be a null pointer and the returned
2063           //       position has nonnull attribute (because the returned value is
2064           //       poison).
2065           bool FoundUB = false;
2066           if (isa<UndefValue>(V)) {
2067             FoundUB = true;
2068           } else {
2069             if (isa<ConstantPointerNull>(V)) {
2070               auto &NonNullAA = A.getAAFor<AANonNull>(
2071                   *this, IRPosition::returned(*getAnchorScope()),
2072                   DepClassTy::NONE);
2073               if (NonNullAA.isKnownNonNull())
2074                 FoundUB = true;
2075             }
2076           }
2077 
2078           if (FoundUB)
2079             for (ReturnInst *RI : RetInsts)
2080               KnownUBInsts.insert(RI);
2081           return true;
2082         };
2083 
2084     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2085                               {Instruction::Load, Instruction::Store,
2086                                Instruction::AtomicCmpXchg,
2087                                Instruction::AtomicRMW},
2088                               /* CheckBBLivenessOnly */ true);
2089     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2090                               /* CheckBBLivenessOnly */ true);
2091     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2092 
2093     // If the returned position of the anchor scope has noundef attriubte, check
2094     // all returned instructions.
2095     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2096       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2097       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2098         auto &RetPosNoUndefAA =
2099             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2100         if (RetPosNoUndefAA.isKnownNoUndef())
2101           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2102                                                     *this);
2103       }
2104     }
2105 
2106     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2107         UBPrevSize != KnownUBInsts.size())
2108       return ChangeStatus::CHANGED;
2109     return ChangeStatus::UNCHANGED;
2110   }
2111 
2112   bool isKnownToCauseUB(Instruction *I) const override {
2113     return KnownUBInsts.count(I);
2114   }
2115 
2116   bool isAssumedToCauseUB(Instruction *I) const override {
2117     // In simple words, if an instruction is not in the assumed to _not_
2118     // cause UB, then it is assumed UB (that includes those
2119     // in the KnownUBInsts set). The rest is boilerplate
2120     // is to ensure that it is one of the instructions we test
2121     // for UB.
2122 
2123     switch (I->getOpcode()) {
2124     case Instruction::Load:
2125     case Instruction::Store:
2126     case Instruction::AtomicCmpXchg:
2127     case Instruction::AtomicRMW:
2128       return !AssumedNoUBInsts.count(I);
2129     case Instruction::Br: {
2130       auto BrInst = cast<BranchInst>(I);
2131       if (BrInst->isUnconditional())
2132         return false;
2133       return !AssumedNoUBInsts.count(I);
2134     } break;
2135     default:
2136       return false;
2137     }
2138     return false;
2139   }
2140 
2141   ChangeStatus manifest(Attributor &A) override {
2142     if (KnownUBInsts.empty())
2143       return ChangeStatus::UNCHANGED;
2144     for (Instruction *I : KnownUBInsts)
2145       A.changeToUnreachableAfterManifest(I);
2146     return ChangeStatus::CHANGED;
2147   }
2148 
2149   /// See AbstractAttribute::getAsStr()
2150   const std::string getAsStr() const override {
2151     return getAssumed() ? "undefined-behavior" : "no-ub";
2152   }
2153 
2154   /// Note: The correctness of this analysis depends on the fact that the
2155   /// following 2 sets will stop changing after some point.
2156   /// "Change" here means that their size changes.
2157   /// The size of each set is monotonically increasing
2158   /// (we only add items to them) and it is upper bounded by the number of
2159   /// instructions in the processed function (we can never save more
2160   /// elements in either set than this number). Hence, at some point,
2161   /// they will stop increasing.
2162   /// Consequently, at some point, both sets will have stopped
2163   /// changing, effectively making the analysis reach a fixpoint.
2164 
2165   /// Note: These 2 sets are disjoint and an instruction can be considered
2166   /// one of 3 things:
2167   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2168   ///    the KnownUBInsts set.
2169   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2170   ///    has a reason to assume it).
2171   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2172   ///    could not find a reason to assume or prove that it can cause UB,
2173   ///    hence it assumes it doesn't. We have a set for these instructions
2174   ///    so that we don't reprocess them in every update.
2175   ///    Note however that instructions in this set may cause UB.
2176 
2177 protected:
2178   /// A set of all live instructions _known_ to cause UB.
2179   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2180 
2181 private:
2182   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2183   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2184 
2185   // Should be called on updates in which if we're processing an instruction
2186   // \p I that depends on a value \p V, one of the following has to happen:
2187   // - If the value is assumed, then stop.
2188   // - If the value is known but undef, then consider it UB.
2189   // - Otherwise, do specific processing with the simplified value.
2190   // We return None in the first 2 cases to signify that an appropriate
2191   // action was taken and the caller should stop.
2192   // Otherwise, we return the simplified value that the caller should
2193   // use for specific processing.
2194   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2195                                          Instruction *I) {
2196     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2197         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2198     Optional<Value *> SimplifiedV =
2199         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2200     if (!ValueSimplifyAA.isKnown()) {
2201       // Don't depend on assumed values.
2202       return llvm::None;
2203     }
2204     if (!SimplifiedV.hasValue()) {
2205       // If it is known (which we tested above) but it doesn't have a value,
2206       // then we can assume `undef` and hence the instruction is UB.
2207       KnownUBInsts.insert(I);
2208       return llvm::None;
2209     }
2210     Value *Val = SimplifiedV.getValue();
2211     if (isa<UndefValue>(Val)) {
2212       KnownUBInsts.insert(I);
2213       return llvm::None;
2214     }
2215     return Val;
2216   }
2217 };
2218 
2219 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2220   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2221       : AAUndefinedBehaviorImpl(IRP, A) {}
2222 
2223   /// See AbstractAttribute::trackStatistics()
2224   void trackStatistics() const override {
2225     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2226                "Number of instructions known to have UB");
2227     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2228         KnownUBInsts.size();
2229   }
2230 };
2231 
2232 /// ------------------------ Will-Return Attributes ----------------------------
2233 
2234 // Helper function that checks whether a function has any cycle which we don't
2235 // know if it is bounded or not.
2236 // Loops with maximum trip count are considered bounded, any other cycle not.
2237 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2238   ScalarEvolution *SE =
2239       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2240   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2241   // If either SCEV or LoopInfo is not available for the function then we assume
2242   // any cycle to be unbounded cycle.
2243   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2244   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2245   if (!SE || !LI) {
2246     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2247       if (SCCI.hasCycle())
2248         return true;
2249     return false;
2250   }
2251 
2252   // If there's irreducible control, the function may contain non-loop cycles.
2253   if (mayContainIrreducibleControl(F, LI))
2254     return true;
2255 
2256   // Any loop that does not have a max trip count is considered unbounded cycle.
2257   for (auto *L : LI->getLoopsInPreorder()) {
2258     if (!SE->getSmallConstantMaxTripCount(L))
2259       return true;
2260   }
2261   return false;
2262 }
2263 
2264 struct AAWillReturnImpl : public AAWillReturn {
2265   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2266       : AAWillReturn(IRP, A) {}
2267 
2268   /// See AbstractAttribute::initialize(...).
2269   void initialize(Attributor &A) override {
2270     AAWillReturn::initialize(A);
2271 
2272     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2273       indicateOptimisticFixpoint();
2274       return;
2275     }
2276   }
2277 
2278   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2279   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2280     // Check for `mustprogress` in the scope and the associated function which
2281     // might be different if this is a call site.
2282     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2283         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2284       return false;
2285 
2286     const auto &MemAA =
2287         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2288     if (!MemAA.isAssumedReadOnly())
2289       return false;
2290     if (KnownOnly && !MemAA.isKnownReadOnly())
2291       return false;
2292     if (!MemAA.isKnownReadOnly())
2293       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2294 
2295     return true;
2296   }
2297 
2298   /// See AbstractAttribute::updateImpl(...).
2299   ChangeStatus updateImpl(Attributor &A) override {
2300     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2301       return ChangeStatus::UNCHANGED;
2302 
2303     auto CheckForWillReturn = [&](Instruction &I) {
2304       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2305       const auto &WillReturnAA =
2306           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2307       if (WillReturnAA.isKnownWillReturn())
2308         return true;
2309       if (!WillReturnAA.isAssumedWillReturn())
2310         return false;
2311       const auto &NoRecurseAA =
2312           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2313       return NoRecurseAA.isAssumedNoRecurse();
2314     };
2315 
2316     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2317       return indicatePessimisticFixpoint();
2318 
2319     return ChangeStatus::UNCHANGED;
2320   }
2321 
2322   /// See AbstractAttribute::getAsStr()
2323   const std::string getAsStr() const override {
2324     return getAssumed() ? "willreturn" : "may-noreturn";
2325   }
2326 };
2327 
2328 struct AAWillReturnFunction final : AAWillReturnImpl {
2329   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2330       : AAWillReturnImpl(IRP, A) {}
2331 
2332   /// See AbstractAttribute::initialize(...).
2333   void initialize(Attributor &A) override {
2334     AAWillReturnImpl::initialize(A);
2335 
2336     Function *F = getAnchorScope();
2337     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2338       indicatePessimisticFixpoint();
2339   }
2340 
2341   /// See AbstractAttribute::trackStatistics()
2342   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2343 };
2344 
2345 /// WillReturn attribute deduction for a call sites.
2346 struct AAWillReturnCallSite final : AAWillReturnImpl {
2347   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2348       : AAWillReturnImpl(IRP, A) {}
2349 
2350   /// See AbstractAttribute::initialize(...).
2351   void initialize(Attributor &A) override {
2352     AAWillReturnImpl::initialize(A);
2353     Function *F = getAssociatedFunction();
2354     if (!F || !A.isFunctionIPOAmendable(*F))
2355       indicatePessimisticFixpoint();
2356   }
2357 
2358   /// See AbstractAttribute::updateImpl(...).
2359   ChangeStatus updateImpl(Attributor &A) override {
2360     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2361       return ChangeStatus::UNCHANGED;
2362 
2363     // TODO: Once we have call site specific value information we can provide
2364     //       call site specific liveness information and then it makes
2365     //       sense to specialize attributes for call sites arguments instead of
2366     //       redirecting requests to the callee argument.
2367     Function *F = getAssociatedFunction();
2368     const IRPosition &FnPos = IRPosition::function(*F);
2369     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2370     return clampStateAndIndicateChange(getState(), FnAA.getState());
2371   }
2372 
2373   /// See AbstractAttribute::trackStatistics()
2374   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2375 };
2376 
2377 /// -------------------AAReachability Attribute--------------------------
2378 
2379 struct AAReachabilityImpl : AAReachability {
2380   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2381       : AAReachability(IRP, A) {}
2382 
2383   const std::string getAsStr() const override {
2384     // TODO: Return the number of reachable queries.
2385     return "reachable";
2386   }
2387 
2388   /// See AbstractAttribute::initialize(...).
2389   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2390 
2391   /// See AbstractAttribute::updateImpl(...).
2392   ChangeStatus updateImpl(Attributor &A) override {
2393     return indicatePessimisticFixpoint();
2394   }
2395 };
2396 
2397 struct AAReachabilityFunction final : public AAReachabilityImpl {
2398   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2399       : AAReachabilityImpl(IRP, A) {}
2400 
2401   /// See AbstractAttribute::trackStatistics()
2402   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2403 };
2404 
2405 /// ------------------------ NoAlias Argument Attribute ------------------------
2406 
2407 struct AANoAliasImpl : AANoAlias {
2408   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2409     assert(getAssociatedType()->isPointerTy() &&
2410            "Noalias is a pointer attribute");
2411   }
2412 
2413   const std::string getAsStr() const override {
2414     return getAssumed() ? "noalias" : "may-alias";
2415   }
2416 };
2417 
2418 /// NoAlias attribute for a floating value.
2419 struct AANoAliasFloating final : AANoAliasImpl {
2420   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2421       : AANoAliasImpl(IRP, A) {}
2422 
2423   /// See AbstractAttribute::initialize(...).
2424   void initialize(Attributor &A) override {
2425     AANoAliasImpl::initialize(A);
2426     Value *Val = &getAssociatedValue();
2427     do {
2428       CastInst *CI = dyn_cast<CastInst>(Val);
2429       if (!CI)
2430         break;
2431       Value *Base = CI->getOperand(0);
2432       if (!Base->hasOneUse())
2433         break;
2434       Val = Base;
2435     } while (true);
2436 
2437     if (!Val->getType()->isPointerTy()) {
2438       indicatePessimisticFixpoint();
2439       return;
2440     }
2441 
2442     if (isa<AllocaInst>(Val))
2443       indicateOptimisticFixpoint();
2444     else if (isa<ConstantPointerNull>(Val) &&
2445              !NullPointerIsDefined(getAnchorScope(),
2446                                    Val->getType()->getPointerAddressSpace()))
2447       indicateOptimisticFixpoint();
2448     else if (Val != &getAssociatedValue()) {
2449       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2450           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2451       if (ValNoAliasAA.isKnownNoAlias())
2452         indicateOptimisticFixpoint();
2453     }
2454   }
2455 
2456   /// See AbstractAttribute::updateImpl(...).
2457   ChangeStatus updateImpl(Attributor &A) override {
2458     // TODO: Implement this.
2459     return indicatePessimisticFixpoint();
2460   }
2461 
2462   /// See AbstractAttribute::trackStatistics()
2463   void trackStatistics() const override {
2464     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2465   }
2466 };
2467 
2468 /// NoAlias attribute for an argument.
2469 struct AANoAliasArgument final
2470     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2471   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2472   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2473 
2474   /// See AbstractAttribute::initialize(...).
2475   void initialize(Attributor &A) override {
2476     Base::initialize(A);
2477     // See callsite argument attribute and callee argument attribute.
2478     if (hasAttr({Attribute::ByVal}))
2479       indicateOptimisticFixpoint();
2480   }
2481 
2482   /// See AbstractAttribute::update(...).
2483   ChangeStatus updateImpl(Attributor &A) override {
2484     // We have to make sure no-alias on the argument does not break
2485     // synchronization when this is a callback argument, see also [1] below.
2486     // If synchronization cannot be affected, we delegate to the base updateImpl
2487     // function, otherwise we give up for now.
2488 
2489     // If the function is no-sync, no-alias cannot break synchronization.
2490     const auto &NoSyncAA =
2491         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2492                              DepClassTy::OPTIONAL);
2493     if (NoSyncAA.isAssumedNoSync())
2494       return Base::updateImpl(A);
2495 
2496     // If the argument is read-only, no-alias cannot break synchronization.
2497     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2498         *this, getIRPosition(), DepClassTy::OPTIONAL);
2499     if (MemBehaviorAA.isAssumedReadOnly())
2500       return Base::updateImpl(A);
2501 
2502     // If the argument is never passed through callbacks, no-alias cannot break
2503     // synchronization.
2504     bool AllCallSitesKnown;
2505     if (A.checkForAllCallSites(
2506             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2507             true, AllCallSitesKnown))
2508       return Base::updateImpl(A);
2509 
2510     // TODO: add no-alias but make sure it doesn't break synchronization by
2511     // introducing fake uses. See:
2512     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2513     //     International Workshop on OpenMP 2018,
2514     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2515 
2516     return indicatePessimisticFixpoint();
2517   }
2518 
2519   /// See AbstractAttribute::trackStatistics()
2520   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2521 };
2522 
2523 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2524   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2525       : AANoAliasImpl(IRP, A) {}
2526 
2527   /// See AbstractAttribute::initialize(...).
2528   void initialize(Attributor &A) override {
2529     // See callsite argument attribute and callee argument attribute.
2530     const auto &CB = cast<CallBase>(getAnchorValue());
2531     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2532       indicateOptimisticFixpoint();
2533     Value &Val = getAssociatedValue();
2534     if (isa<ConstantPointerNull>(Val) &&
2535         !NullPointerIsDefined(getAnchorScope(),
2536                               Val.getType()->getPointerAddressSpace()))
2537       indicateOptimisticFixpoint();
2538   }
2539 
2540   /// Determine if the underlying value may alias with the call site argument
2541   /// \p OtherArgNo of \p ICS (= the underlying call site).
2542   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2543                             const AAMemoryBehavior &MemBehaviorAA,
2544                             const CallBase &CB, unsigned OtherArgNo) {
2545     // We do not need to worry about aliasing with the underlying IRP.
2546     if (this->getCalleeArgNo() == (int)OtherArgNo)
2547       return false;
2548 
2549     // If it is not a pointer or pointer vector we do not alias.
2550     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2551     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2552       return false;
2553 
2554     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2555         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2556 
2557     // If the argument is readnone, there is no read-write aliasing.
2558     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2559       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2560       return false;
2561     }
2562 
2563     // If the argument is readonly and the underlying value is readonly, there
2564     // is no read-write aliasing.
2565     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2566     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2567       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2568       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2569       return false;
2570     }
2571 
2572     // We have to utilize actual alias analysis queries so we need the object.
2573     if (!AAR)
2574       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2575 
2576     // Try to rule it out at the call site.
2577     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2578     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2579                          "callsite arguments: "
2580                       << getAssociatedValue() << " " << *ArgOp << " => "
2581                       << (IsAliasing ? "" : "no-") << "alias \n");
2582 
2583     return IsAliasing;
2584   }
2585 
2586   bool
2587   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2588                                          const AAMemoryBehavior &MemBehaviorAA,
2589                                          const AANoAlias &NoAliasAA) {
2590     // We can deduce "noalias" if the following conditions hold.
2591     // (i)   Associated value is assumed to be noalias in the definition.
2592     // (ii)  Associated value is assumed to be no-capture in all the uses
2593     //       possibly executed before this callsite.
2594     // (iii) There is no other pointer argument which could alias with the
2595     //       value.
2596 
2597     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2598     if (!AssociatedValueIsNoAliasAtDef) {
2599       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2600                         << " is not no-alias at the definition\n");
2601       return false;
2602     }
2603 
2604     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2605 
2606     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2607     const Function *ScopeFn = VIRP.getAnchorScope();
2608     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2609     // Check whether the value is captured in the scope using AANoCapture.
2610     //      Look at CFG and check only uses possibly executed before this
2611     //      callsite.
2612     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2613       Instruction *UserI = cast<Instruction>(U.getUser());
2614 
2615       // If UserI is the curr instruction and there is a single potential use of
2616       // the value in UserI we allow the use.
2617       // TODO: We should inspect the operands and allow those that cannot alias
2618       //       with the value.
2619       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2620         return true;
2621 
2622       if (ScopeFn) {
2623         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2624             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2625 
2626         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2627           return true;
2628 
2629         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2630           if (CB->isArgOperand(&U)) {
2631 
2632             unsigned ArgNo = CB->getArgOperandNo(&U);
2633 
2634             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2635                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2636                 DepClassTy::OPTIONAL);
2637 
2638             if (NoCaptureAA.isAssumedNoCapture())
2639               return true;
2640           }
2641         }
2642       }
2643 
2644       // For cases which can potentially have more users
2645       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2646           isa<SelectInst>(U)) {
2647         Follow = true;
2648         return true;
2649       }
2650 
2651       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2652       return false;
2653     };
2654 
2655     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2656       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2657         LLVM_DEBUG(
2658             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2659                    << " cannot be noalias as it is potentially captured\n");
2660         return false;
2661       }
2662     }
2663     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2664 
2665     // Check there is no other pointer argument which could alias with the
2666     // value passed at this call site.
2667     // TODO: AbstractCallSite
2668     const auto &CB = cast<CallBase>(getAnchorValue());
2669     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2670          OtherArgNo++)
2671       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2672         return false;
2673 
2674     return true;
2675   }
2676 
2677   /// See AbstractAttribute::updateImpl(...).
2678   ChangeStatus updateImpl(Attributor &A) override {
2679     // If the argument is readnone we are done as there are no accesses via the
2680     // argument.
2681     auto &MemBehaviorAA =
2682         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2683     if (MemBehaviorAA.isAssumedReadNone()) {
2684       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2685       return ChangeStatus::UNCHANGED;
2686     }
2687 
2688     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2689     const auto &NoAliasAA =
2690         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2691 
2692     AAResults *AAR = nullptr;
2693     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2694                                                NoAliasAA)) {
2695       LLVM_DEBUG(
2696           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2697       return ChangeStatus::UNCHANGED;
2698     }
2699 
2700     return indicatePessimisticFixpoint();
2701   }
2702 
2703   /// See AbstractAttribute::trackStatistics()
2704   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2705 };
2706 
2707 /// NoAlias attribute for function return value.
2708 struct AANoAliasReturned final : AANoAliasImpl {
2709   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2710       : AANoAliasImpl(IRP, A) {}
2711 
2712   /// See AbstractAttribute::initialize(...).
2713   void initialize(Attributor &A) override {
2714     AANoAliasImpl::initialize(A);
2715     Function *F = getAssociatedFunction();
2716     if (!F || F->isDeclaration())
2717       indicatePessimisticFixpoint();
2718   }
2719 
2720   /// See AbstractAttribute::updateImpl(...).
2721   virtual ChangeStatus updateImpl(Attributor &A) override {
2722 
2723     auto CheckReturnValue = [&](Value &RV) -> bool {
2724       if (Constant *C = dyn_cast<Constant>(&RV))
2725         if (C->isNullValue() || isa<UndefValue>(C))
2726           return true;
2727 
2728       /// For now, we can only deduce noalias if we have call sites.
2729       /// FIXME: add more support.
2730       if (!isa<CallBase>(&RV))
2731         return false;
2732 
2733       const IRPosition &RVPos = IRPosition::value(RV);
2734       const auto &NoAliasAA =
2735           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2736       if (!NoAliasAA.isAssumedNoAlias())
2737         return false;
2738 
2739       const auto &NoCaptureAA =
2740           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2741       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2742     };
2743 
2744     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2745       return indicatePessimisticFixpoint();
2746 
2747     return ChangeStatus::UNCHANGED;
2748   }
2749 
2750   /// See AbstractAttribute::trackStatistics()
2751   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2752 };
2753 
2754 /// NoAlias attribute deduction for a call site return value.
2755 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2756   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2757       : AANoAliasImpl(IRP, A) {}
2758 
2759   /// See AbstractAttribute::initialize(...).
2760   void initialize(Attributor &A) override {
2761     AANoAliasImpl::initialize(A);
2762     Function *F = getAssociatedFunction();
2763     if (!F || F->isDeclaration())
2764       indicatePessimisticFixpoint();
2765   }
2766 
2767   /// See AbstractAttribute::updateImpl(...).
2768   ChangeStatus updateImpl(Attributor &A) override {
2769     // TODO: Once we have call site specific value information we can provide
2770     //       call site specific liveness information and then it makes
2771     //       sense to specialize attributes for call sites arguments instead of
2772     //       redirecting requests to the callee argument.
2773     Function *F = getAssociatedFunction();
2774     const IRPosition &FnPos = IRPosition::returned(*F);
2775     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2776     return clampStateAndIndicateChange(getState(), FnAA.getState());
2777   }
2778 
2779   /// See AbstractAttribute::trackStatistics()
2780   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2781 };
2782 
2783 /// -------------------AAIsDead Function Attribute-----------------------
2784 
2785 struct AAIsDeadValueImpl : public AAIsDead {
2786   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2787 
2788   /// See AAIsDead::isAssumedDead().
2789   bool isAssumedDead() const override { return getAssumed(); }
2790 
2791   /// See AAIsDead::isKnownDead().
2792   bool isKnownDead() const override { return getKnown(); }
2793 
2794   /// See AAIsDead::isAssumedDead(BasicBlock *).
2795   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2796 
2797   /// See AAIsDead::isKnownDead(BasicBlock *).
2798   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2799 
2800   /// See AAIsDead::isAssumedDead(Instruction *I).
2801   bool isAssumedDead(const Instruction *I) const override {
2802     return I == getCtxI() && isAssumedDead();
2803   }
2804 
2805   /// See AAIsDead::isKnownDead(Instruction *I).
2806   bool isKnownDead(const Instruction *I) const override {
2807     return isAssumedDead(I) && getKnown();
2808   }
2809 
2810   /// See AbstractAttribute::getAsStr().
2811   const std::string getAsStr() const override {
2812     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2813   }
2814 
2815   /// Check if all uses are assumed dead.
2816   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2817     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2818     // Explicitly set the dependence class to required because we want a long
2819     // chain of N dependent instructions to be considered live as soon as one is
2820     // without going through N update cycles. This is not required for
2821     // correctness.
2822     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2823   }
2824 
2825   /// Determine if \p I is assumed to be side-effect free.
2826   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2827     if (!I || wouldInstructionBeTriviallyDead(I))
2828       return true;
2829 
2830     auto *CB = dyn_cast<CallBase>(I);
2831     if (!CB || isa<IntrinsicInst>(CB))
2832       return false;
2833 
2834     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2835     const auto &NoUnwindAA =
2836         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2837     if (!NoUnwindAA.isAssumedNoUnwind())
2838       return false;
2839     if (!NoUnwindAA.isKnownNoUnwind())
2840       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2841 
2842     const auto &MemBehaviorAA =
2843         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2844     if (MemBehaviorAA.isAssumedReadOnly()) {
2845       if (!MemBehaviorAA.isKnownReadOnly())
2846         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2847       return true;
2848     }
2849     return false;
2850   }
2851 };
2852 
2853 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2854   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2855       : AAIsDeadValueImpl(IRP, A) {}
2856 
2857   /// See AbstractAttribute::initialize(...).
2858   void initialize(Attributor &A) override {
2859     if (isa<UndefValue>(getAssociatedValue())) {
2860       indicatePessimisticFixpoint();
2861       return;
2862     }
2863 
2864     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2865     if (!isAssumedSideEffectFree(A, I))
2866       indicatePessimisticFixpoint();
2867   }
2868 
2869   /// See AbstractAttribute::updateImpl(...).
2870   ChangeStatus updateImpl(Attributor &A) override {
2871     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2872     if (!isAssumedSideEffectFree(A, I))
2873       return indicatePessimisticFixpoint();
2874 
2875     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2876       return indicatePessimisticFixpoint();
2877     return ChangeStatus::UNCHANGED;
2878   }
2879 
2880   /// See AbstractAttribute::manifest(...).
2881   ChangeStatus manifest(Attributor &A) override {
2882     Value &V = getAssociatedValue();
2883     if (auto *I = dyn_cast<Instruction>(&V)) {
2884       // If we get here we basically know the users are all dead. We check if
2885       // isAssumedSideEffectFree returns true here again because it might not be
2886       // the case and only the users are dead but the instruction (=call) is
2887       // still needed.
2888       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2889         A.deleteAfterManifest(*I);
2890         return ChangeStatus::CHANGED;
2891       }
2892     }
2893     if (V.use_empty())
2894       return ChangeStatus::UNCHANGED;
2895 
2896     bool UsedAssumedInformation = false;
2897     Optional<Constant *> C =
2898         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2899     if (C.hasValue() && C.getValue())
2900       return ChangeStatus::UNCHANGED;
2901 
2902     // Replace the value with undef as it is dead but keep droppable uses around
2903     // as they provide information we don't want to give up on just yet.
2904     UndefValue &UV = *UndefValue::get(V.getType());
2905     bool AnyChange =
2906         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2907     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2908   }
2909 
2910   /// See AbstractAttribute::trackStatistics()
2911   void trackStatistics() const override {
2912     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2913   }
2914 };
2915 
2916 struct AAIsDeadArgument : public AAIsDeadFloating {
2917   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2918       : AAIsDeadFloating(IRP, A) {}
2919 
2920   /// See AbstractAttribute::initialize(...).
2921   void initialize(Attributor &A) override {
2922     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2923       indicatePessimisticFixpoint();
2924   }
2925 
2926   /// See AbstractAttribute::manifest(...).
2927   ChangeStatus manifest(Attributor &A) override {
2928     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2929     Argument &Arg = *getAssociatedArgument();
2930     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2931       if (A.registerFunctionSignatureRewrite(
2932               Arg, /* ReplacementTypes */ {},
2933               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2934               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2935         Arg.dropDroppableUses();
2936         return ChangeStatus::CHANGED;
2937       }
2938     return Changed;
2939   }
2940 
2941   /// See AbstractAttribute::trackStatistics()
2942   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2943 };
2944 
2945 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2946   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2947       : AAIsDeadValueImpl(IRP, A) {}
2948 
2949   /// See AbstractAttribute::initialize(...).
2950   void initialize(Attributor &A) override {
2951     if (isa<UndefValue>(getAssociatedValue()))
2952       indicatePessimisticFixpoint();
2953   }
2954 
2955   /// See AbstractAttribute::updateImpl(...).
2956   ChangeStatus updateImpl(Attributor &A) override {
2957     // TODO: Once we have call site specific value information we can provide
2958     //       call site specific liveness information and then it makes
2959     //       sense to specialize attributes for call sites arguments instead of
2960     //       redirecting requests to the callee argument.
2961     Argument *Arg = getAssociatedArgument();
2962     if (!Arg)
2963       return indicatePessimisticFixpoint();
2964     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2965     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2966     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2967   }
2968 
2969   /// See AbstractAttribute::manifest(...).
2970   ChangeStatus manifest(Attributor &A) override {
2971     CallBase &CB = cast<CallBase>(getAnchorValue());
2972     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2973     assert(!isa<UndefValue>(U.get()) &&
2974            "Expected undef values to be filtered out!");
2975     UndefValue &UV = *UndefValue::get(U->getType());
2976     if (A.changeUseAfterManifest(U, UV))
2977       return ChangeStatus::CHANGED;
2978     return ChangeStatus::UNCHANGED;
2979   }
2980 
2981   /// See AbstractAttribute::trackStatistics()
2982   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2983 };
2984 
2985 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2986   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2987       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2988 
2989   /// See AAIsDead::isAssumedDead().
2990   bool isAssumedDead() const override {
2991     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2992   }
2993 
2994   /// See AbstractAttribute::initialize(...).
2995   void initialize(Attributor &A) override {
2996     if (isa<UndefValue>(getAssociatedValue())) {
2997       indicatePessimisticFixpoint();
2998       return;
2999     }
3000 
3001     // We track this separately as a secondary state.
3002     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3003   }
3004 
3005   /// See AbstractAttribute::updateImpl(...).
3006   ChangeStatus updateImpl(Attributor &A) override {
3007     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3008     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3009       IsAssumedSideEffectFree = false;
3010       Changed = ChangeStatus::CHANGED;
3011     }
3012 
3013     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3014       return indicatePessimisticFixpoint();
3015     return Changed;
3016   }
3017 
3018   /// See AbstractAttribute::trackStatistics()
3019   void trackStatistics() const override {
3020     if (IsAssumedSideEffectFree)
3021       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3022     else
3023       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3024   }
3025 
3026   /// See AbstractAttribute::getAsStr().
3027   const std::string getAsStr() const override {
3028     return isAssumedDead()
3029                ? "assumed-dead"
3030                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3031   }
3032 
3033 private:
3034   bool IsAssumedSideEffectFree;
3035 };
3036 
3037 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3038   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3039       : AAIsDeadValueImpl(IRP, A) {}
3040 
3041   /// See AbstractAttribute::updateImpl(...).
3042   ChangeStatus updateImpl(Attributor &A) override {
3043 
3044     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3045                               {Instruction::Ret});
3046 
3047     auto PredForCallSite = [&](AbstractCallSite ACS) {
3048       if (ACS.isCallbackCall() || !ACS.getInstruction())
3049         return false;
3050       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3051     };
3052 
3053     bool AllCallSitesKnown;
3054     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3055                                 AllCallSitesKnown))
3056       return indicatePessimisticFixpoint();
3057 
3058     return ChangeStatus::UNCHANGED;
3059   }
3060 
3061   /// See AbstractAttribute::manifest(...).
3062   ChangeStatus manifest(Attributor &A) override {
3063     // TODO: Rewrite the signature to return void?
3064     bool AnyChange = false;
3065     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3066     auto RetInstPred = [&](Instruction &I) {
3067       ReturnInst &RI = cast<ReturnInst>(I);
3068       if (!isa<UndefValue>(RI.getReturnValue()))
3069         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3070       return true;
3071     };
3072     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3073     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3074   }
3075 
3076   /// See AbstractAttribute::trackStatistics()
3077   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3078 };
3079 
3080 struct AAIsDeadFunction : public AAIsDead {
3081   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3082 
3083   /// See AbstractAttribute::initialize(...).
3084   void initialize(Attributor &A) override {
3085     const Function *F = getAnchorScope();
3086     if (F && !F->isDeclaration()) {
3087       // We only want to compute liveness once. If the function is not part of
3088       // the SCC, skip it.
3089       if (A.isRunOn(*const_cast<Function *>(F))) {
3090         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3091         assumeLive(A, F->getEntryBlock());
3092       } else {
3093         indicatePessimisticFixpoint();
3094       }
3095     }
3096   }
3097 
3098   /// See AbstractAttribute::getAsStr().
3099   const std::string getAsStr() const override {
3100     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3101            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3102            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3103            std::to_string(KnownDeadEnds.size()) + "]";
3104   }
3105 
3106   /// See AbstractAttribute::manifest(...).
3107   ChangeStatus manifest(Attributor &A) override {
3108     assert(getState().isValidState() &&
3109            "Attempted to manifest an invalid state!");
3110 
3111     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3112     Function &F = *getAnchorScope();
3113 
3114     if (AssumedLiveBlocks.empty()) {
3115       A.deleteAfterManifest(F);
3116       return ChangeStatus::CHANGED;
3117     }
3118 
3119     // Flag to determine if we can change an invoke to a call assuming the
3120     // callee is nounwind. This is not possible if the personality of the
3121     // function allows to catch asynchronous exceptions.
3122     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3123 
3124     KnownDeadEnds.set_union(ToBeExploredFrom);
3125     for (const Instruction *DeadEndI : KnownDeadEnds) {
3126       auto *CB = dyn_cast<CallBase>(DeadEndI);
3127       if (!CB)
3128         continue;
3129       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3130           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3131       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3132       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3133         continue;
3134 
3135       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3136         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3137       else
3138         A.changeToUnreachableAfterManifest(
3139             const_cast<Instruction *>(DeadEndI->getNextNode()));
3140       HasChanged = ChangeStatus::CHANGED;
3141     }
3142 
3143     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3144     for (BasicBlock &BB : F)
3145       if (!AssumedLiveBlocks.count(&BB)) {
3146         A.deleteAfterManifest(BB);
3147         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3148       }
3149 
3150     return HasChanged;
3151   }
3152 
3153   /// See AbstractAttribute::updateImpl(...).
3154   ChangeStatus updateImpl(Attributor &A) override;
3155 
3156   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3157     return !AssumedLiveEdges.count(std::make_pair(From, To));
3158   }
3159 
3160   /// See AbstractAttribute::trackStatistics()
3161   void trackStatistics() const override {}
3162 
3163   /// Returns true if the function is assumed dead.
3164   bool isAssumedDead() const override { return false; }
3165 
3166   /// See AAIsDead::isKnownDead().
3167   bool isKnownDead() const override { return false; }
3168 
3169   /// See AAIsDead::isAssumedDead(BasicBlock *).
3170   bool isAssumedDead(const BasicBlock *BB) const override {
3171     assert(BB->getParent() == getAnchorScope() &&
3172            "BB must be in the same anchor scope function.");
3173 
3174     if (!getAssumed())
3175       return false;
3176     return !AssumedLiveBlocks.count(BB);
3177   }
3178 
3179   /// See AAIsDead::isKnownDead(BasicBlock *).
3180   bool isKnownDead(const BasicBlock *BB) const override {
3181     return getKnown() && isAssumedDead(BB);
3182   }
3183 
3184   /// See AAIsDead::isAssumed(Instruction *I).
3185   bool isAssumedDead(const Instruction *I) const override {
3186     assert(I->getParent()->getParent() == getAnchorScope() &&
3187            "Instruction must be in the same anchor scope function.");
3188 
3189     if (!getAssumed())
3190       return false;
3191 
3192     // If it is not in AssumedLiveBlocks then it for sure dead.
3193     // Otherwise, it can still be after noreturn call in a live block.
3194     if (!AssumedLiveBlocks.count(I->getParent()))
3195       return true;
3196 
3197     // If it is not after a liveness barrier it is live.
3198     const Instruction *PrevI = I->getPrevNode();
3199     while (PrevI) {
3200       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3201         return true;
3202       PrevI = PrevI->getPrevNode();
3203     }
3204     return false;
3205   }
3206 
3207   /// See AAIsDead::isKnownDead(Instruction *I).
3208   bool isKnownDead(const Instruction *I) const override {
3209     return getKnown() && isAssumedDead(I);
3210   }
3211 
3212   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3213   /// that internal function called from \p BB should now be looked at.
3214   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3215     if (!AssumedLiveBlocks.insert(&BB).second)
3216       return false;
3217 
3218     // We assume that all of BB is (probably) live now and if there are calls to
3219     // internal functions we will assume that those are now live as well. This
3220     // is a performance optimization for blocks with calls to a lot of internal
3221     // functions. It can however cause dead functions to be treated as live.
3222     for (const Instruction &I : BB)
3223       if (const auto *CB = dyn_cast<CallBase>(&I))
3224         if (const Function *F = CB->getCalledFunction())
3225           if (F->hasLocalLinkage())
3226             A.markLiveInternalFunction(*F);
3227     return true;
3228   }
3229 
3230   /// Collection of instructions that need to be explored again, e.g., we
3231   /// did assume they do not transfer control to (one of their) successors.
3232   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3233 
3234   /// Collection of instructions that are known to not transfer control.
3235   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3236 
3237   /// Collection of all assumed live edges
3238   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3239 
3240   /// Collection of all assumed live BasicBlocks.
3241   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3242 };
3243 
3244 static bool
3245 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3246                         AbstractAttribute &AA,
3247                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3248   const IRPosition &IPos = IRPosition::callsite_function(CB);
3249 
3250   const auto &NoReturnAA =
3251       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3252   if (NoReturnAA.isAssumedNoReturn())
3253     return !NoReturnAA.isKnownNoReturn();
3254   if (CB.isTerminator())
3255     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3256   else
3257     AliveSuccessors.push_back(CB.getNextNode());
3258   return false;
3259 }
3260 
3261 static bool
3262 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3263                         AbstractAttribute &AA,
3264                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3265   bool UsedAssumedInformation =
3266       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3267 
3268   // First, determine if we can change an invoke to a call assuming the
3269   // callee is nounwind. This is not possible if the personality of the
3270   // function allows to catch asynchronous exceptions.
3271   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3272     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3273   } else {
3274     const IRPosition &IPos = IRPosition::callsite_function(II);
3275     const auto &AANoUnw =
3276         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3277     if (AANoUnw.isAssumedNoUnwind()) {
3278       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3279     } else {
3280       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3281     }
3282   }
3283   return UsedAssumedInformation;
3284 }
3285 
3286 static bool
3287 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3288                         AbstractAttribute &AA,
3289                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3290   bool UsedAssumedInformation = false;
3291   if (BI.getNumSuccessors() == 1) {
3292     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3293   } else {
3294     Optional<Constant *> C =
3295         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3296     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3297       // No value yet, assume both edges are dead.
3298     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3299       const BasicBlock *SuccBB =
3300           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3301       AliveSuccessors.push_back(&SuccBB->front());
3302     } else {
3303       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3304       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3305       UsedAssumedInformation = false;
3306     }
3307   }
3308   return UsedAssumedInformation;
3309 }
3310 
3311 static bool
3312 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3313                         AbstractAttribute &AA,
3314                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3315   bool UsedAssumedInformation = false;
3316   Optional<Constant *> C =
3317       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3318   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3319     // No value yet, assume all edges are dead.
3320   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3321     for (auto &CaseIt : SI.cases()) {
3322       if (CaseIt.getCaseValue() == C.getValue()) {
3323         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3324         return UsedAssumedInformation;
3325       }
3326     }
3327     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3328     return UsedAssumedInformation;
3329   } else {
3330     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3331       AliveSuccessors.push_back(&SuccBB->front());
3332   }
3333   return UsedAssumedInformation;
3334 }
3335 
3336 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3337   ChangeStatus Change = ChangeStatus::UNCHANGED;
3338 
3339   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3340                     << getAnchorScope()->size() << "] BBs and "
3341                     << ToBeExploredFrom.size() << " exploration points and "
3342                     << KnownDeadEnds.size() << " known dead ends\n");
3343 
3344   // Copy and clear the list of instructions we need to explore from. It is
3345   // refilled with instructions the next update has to look at.
3346   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3347                                                ToBeExploredFrom.end());
3348   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3349 
3350   SmallVector<const Instruction *, 8> AliveSuccessors;
3351   while (!Worklist.empty()) {
3352     const Instruction *I = Worklist.pop_back_val();
3353     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3354 
3355     // Fast forward for uninteresting instructions. We could look for UB here
3356     // though.
3357     while (!I->isTerminator() && !isa<CallBase>(I)) {
3358       Change = ChangeStatus::CHANGED;
3359       I = I->getNextNode();
3360     }
3361 
3362     AliveSuccessors.clear();
3363 
3364     bool UsedAssumedInformation = false;
3365     switch (I->getOpcode()) {
3366     // TODO: look for (assumed) UB to backwards propagate "deadness".
3367     default:
3368       assert(I->isTerminator() &&
3369              "Expected non-terminators to be handled already!");
3370       for (const BasicBlock *SuccBB : successors(I->getParent()))
3371         AliveSuccessors.push_back(&SuccBB->front());
3372       break;
3373     case Instruction::Call:
3374       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3375                                                        *this, AliveSuccessors);
3376       break;
3377     case Instruction::Invoke:
3378       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3379                                                        *this, AliveSuccessors);
3380       break;
3381     case Instruction::Br:
3382       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3383                                                        *this, AliveSuccessors);
3384       break;
3385     case Instruction::Switch:
3386       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3387                                                        *this, AliveSuccessors);
3388       break;
3389     }
3390 
3391     if (UsedAssumedInformation) {
3392       NewToBeExploredFrom.insert(I);
3393     } else {
3394       Change = ChangeStatus::CHANGED;
3395       if (AliveSuccessors.empty() ||
3396           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3397         KnownDeadEnds.insert(I);
3398     }
3399 
3400     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3401                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3402                       << UsedAssumedInformation << "\n");
3403 
3404     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3405       if (!I->isTerminator()) {
3406         assert(AliveSuccessors.size() == 1 &&
3407                "Non-terminator expected to have a single successor!");
3408         Worklist.push_back(AliveSuccessor);
3409       } else {
3410         // record the assumed live edge
3411         AssumedLiveEdges.insert(
3412             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3413         if (assumeLive(A, *AliveSuccessor->getParent()))
3414           Worklist.push_back(AliveSuccessor);
3415       }
3416     }
3417   }
3418 
3419   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3420 
3421   // If we know everything is live there is no need to query for liveness.
3422   // Instead, indicating a pessimistic fixpoint will cause the state to be
3423   // "invalid" and all queries to be answered conservatively without lookups.
3424   // To be in this state we have to (1) finished the exploration and (3) not
3425   // discovered any non-trivial dead end and (2) not ruled unreachable code
3426   // dead.
3427   if (ToBeExploredFrom.empty() &&
3428       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3429       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3430         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3431       }))
3432     return indicatePessimisticFixpoint();
3433   return Change;
3434 }
3435 
3436 /// Liveness information for a call sites.
3437 struct AAIsDeadCallSite final : AAIsDeadFunction {
3438   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3439       : AAIsDeadFunction(IRP, A) {}
3440 
3441   /// See AbstractAttribute::initialize(...).
3442   void initialize(Attributor &A) override {
3443     // TODO: Once we have call site specific value information we can provide
3444     //       call site specific liveness information and then it makes
3445     //       sense to specialize attributes for call sites instead of
3446     //       redirecting requests to the callee.
3447     llvm_unreachable("Abstract attributes for liveness are not "
3448                      "supported for call sites yet!");
3449   }
3450 
3451   /// See AbstractAttribute::updateImpl(...).
3452   ChangeStatus updateImpl(Attributor &A) override {
3453     return indicatePessimisticFixpoint();
3454   }
3455 
3456   /// See AbstractAttribute::trackStatistics()
3457   void trackStatistics() const override {}
3458 };
3459 
3460 /// -------------------- Dereferenceable Argument Attribute --------------------
3461 
3462 template <>
3463 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3464                                                      const DerefState &R) {
3465   ChangeStatus CS0 =
3466       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3467   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3468   return CS0 | CS1;
3469 }
3470 
3471 struct AADereferenceableImpl : AADereferenceable {
3472   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3473       : AADereferenceable(IRP, A) {}
3474   using StateType = DerefState;
3475 
3476   /// See AbstractAttribute::initialize(...).
3477   void initialize(Attributor &A) override {
3478     SmallVector<Attribute, 4> Attrs;
3479     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3480              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3481     for (const Attribute &Attr : Attrs)
3482       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3483 
3484     const IRPosition &IRP = this->getIRPosition();
3485     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3486 
3487     bool CanBeNull, CanBeFreed;
3488     takeKnownDerefBytesMaximum(
3489         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3490             A.getDataLayout(), CanBeNull, CanBeFreed));
3491 
3492     bool IsFnInterface = IRP.isFnInterfaceKind();
3493     Function *FnScope = IRP.getAnchorScope();
3494     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3495       indicatePessimisticFixpoint();
3496       return;
3497     }
3498 
3499     if (Instruction *CtxI = getCtxI())
3500       followUsesInMBEC(*this, A, getState(), *CtxI);
3501   }
3502 
3503   /// See AbstractAttribute::getState()
3504   /// {
3505   StateType &getState() override { return *this; }
3506   const StateType &getState() const override { return *this; }
3507   /// }
3508 
3509   /// Helper function for collecting accessed bytes in must-be-executed-context
3510   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3511                               DerefState &State) {
3512     const Value *UseV = U->get();
3513     if (!UseV->getType()->isPointerTy())
3514       return;
3515 
3516     Type *PtrTy = UseV->getType();
3517     const DataLayout &DL = A.getDataLayout();
3518     int64_t Offset;
3519     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3520             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3521       if (Base == &getAssociatedValue() &&
3522           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3523         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3524         State.addAccessedBytes(Offset, Size);
3525       }
3526     }
3527   }
3528 
3529   /// See followUsesInMBEC
3530   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3531                        AADereferenceable::StateType &State) {
3532     bool IsNonNull = false;
3533     bool TrackUse = false;
3534     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3535         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3536     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3537                       << " for instruction " << *I << "\n");
3538 
3539     addAccessedBytesForUse(A, U, I, State);
3540     State.takeKnownDerefBytesMaximum(DerefBytes);
3541     return TrackUse;
3542   }
3543 
3544   /// See AbstractAttribute::manifest(...).
3545   ChangeStatus manifest(Attributor &A) override {
3546     ChangeStatus Change = AADereferenceable::manifest(A);
3547     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3548       removeAttrs({Attribute::DereferenceableOrNull});
3549       return ChangeStatus::CHANGED;
3550     }
3551     return Change;
3552   }
3553 
3554   void getDeducedAttributes(LLVMContext &Ctx,
3555                             SmallVectorImpl<Attribute> &Attrs) const override {
3556     // TODO: Add *_globally support
3557     if (isAssumedNonNull())
3558       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3559           Ctx, getAssumedDereferenceableBytes()));
3560     else
3561       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3562           Ctx, getAssumedDereferenceableBytes()));
3563   }
3564 
3565   /// See AbstractAttribute::getAsStr().
3566   const std::string getAsStr() const override {
3567     if (!getAssumedDereferenceableBytes())
3568       return "unknown-dereferenceable";
3569     return std::string("dereferenceable") +
3570            (isAssumedNonNull() ? "" : "_or_null") +
3571            (isAssumedGlobal() ? "_globally" : "") + "<" +
3572            std::to_string(getKnownDereferenceableBytes()) + "-" +
3573            std::to_string(getAssumedDereferenceableBytes()) + ">";
3574   }
3575 };
3576 
3577 /// Dereferenceable attribute for a floating value.
3578 struct AADereferenceableFloating : AADereferenceableImpl {
3579   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3580       : AADereferenceableImpl(IRP, A) {}
3581 
3582   /// See AbstractAttribute::updateImpl(...).
3583   ChangeStatus updateImpl(Attributor &A) override {
3584     const DataLayout &DL = A.getDataLayout();
3585 
3586     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3587                             bool Stripped) -> bool {
3588       unsigned IdxWidth =
3589           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3590       APInt Offset(IdxWidth, 0);
3591       const Value *Base =
3592           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3593 
3594       const auto &AA = A.getAAFor<AADereferenceable>(
3595           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3596       int64_t DerefBytes = 0;
3597       if (!Stripped && this == &AA) {
3598         // Use IR information if we did not strip anything.
3599         // TODO: track globally.
3600         bool CanBeNull, CanBeFreed;
3601         DerefBytes =
3602             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3603         T.GlobalState.indicatePessimisticFixpoint();
3604       } else {
3605         const DerefState &DS = AA.getState();
3606         DerefBytes = DS.DerefBytesState.getAssumed();
3607         T.GlobalState &= DS.GlobalState;
3608       }
3609 
3610       // For now we do not try to "increase" dereferenceability due to negative
3611       // indices as we first have to come up with code to deal with loops and
3612       // for overflows of the dereferenceable bytes.
3613       int64_t OffsetSExt = Offset.getSExtValue();
3614       if (OffsetSExt < 0)
3615         OffsetSExt = 0;
3616 
3617       T.takeAssumedDerefBytesMinimum(
3618           std::max(int64_t(0), DerefBytes - OffsetSExt));
3619 
3620       if (this == &AA) {
3621         if (!Stripped) {
3622           // If nothing was stripped IR information is all we got.
3623           T.takeKnownDerefBytesMaximum(
3624               std::max(int64_t(0), DerefBytes - OffsetSExt));
3625           T.indicatePessimisticFixpoint();
3626         } else if (OffsetSExt > 0) {
3627           // If something was stripped but there is circular reasoning we look
3628           // for the offset. If it is positive we basically decrease the
3629           // dereferenceable bytes in a circluar loop now, which will simply
3630           // drive them down to the known value in a very slow way which we
3631           // can accelerate.
3632           T.indicatePessimisticFixpoint();
3633         }
3634       }
3635 
3636       return T.isValidState();
3637     };
3638 
3639     DerefState T;
3640     if (!genericValueTraversal<AADereferenceable, DerefState>(
3641             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3642       return indicatePessimisticFixpoint();
3643 
3644     return clampStateAndIndicateChange(getState(), T);
3645   }
3646 
3647   /// See AbstractAttribute::trackStatistics()
3648   void trackStatistics() const override {
3649     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3650   }
3651 };
3652 
3653 /// Dereferenceable attribute for a return value.
3654 struct AADereferenceableReturned final
3655     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3656   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3657       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3658             IRP, A) {}
3659 
3660   /// See AbstractAttribute::trackStatistics()
3661   void trackStatistics() const override {
3662     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3663   }
3664 };
3665 
3666 /// Dereferenceable attribute for an argument
3667 struct AADereferenceableArgument final
3668     : AAArgumentFromCallSiteArguments<AADereferenceable,
3669                                       AADereferenceableImpl> {
3670   using Base =
3671       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3672   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3673       : Base(IRP, A) {}
3674 
3675   /// See AbstractAttribute::trackStatistics()
3676   void trackStatistics() const override {
3677     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3678   }
3679 };
3680 
3681 /// Dereferenceable attribute for a call site argument.
3682 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3683   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3684       : AADereferenceableFloating(IRP, A) {}
3685 
3686   /// See AbstractAttribute::trackStatistics()
3687   void trackStatistics() const override {
3688     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3689   }
3690 };
3691 
3692 /// Dereferenceable attribute deduction for a call site return value.
3693 struct AADereferenceableCallSiteReturned final
3694     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3695   using Base =
3696       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3697   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3698       : Base(IRP, A) {}
3699 
3700   /// See AbstractAttribute::trackStatistics()
3701   void trackStatistics() const override {
3702     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3703   }
3704 };
3705 
3706 // ------------------------ Align Argument Attribute ------------------------
3707 
3708 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3709                                     Value &AssociatedValue, const Use *U,
3710                                     const Instruction *I, bool &TrackUse) {
3711   // We need to follow common pointer manipulation uses to the accesses they
3712   // feed into.
3713   if (isa<CastInst>(I)) {
3714     // Follow all but ptr2int casts.
3715     TrackUse = !isa<PtrToIntInst>(I);
3716     return 0;
3717   }
3718   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3719     if (GEP->hasAllConstantIndices())
3720       TrackUse = true;
3721     return 0;
3722   }
3723 
3724   MaybeAlign MA;
3725   if (const auto *CB = dyn_cast<CallBase>(I)) {
3726     if (CB->isBundleOperand(U) || CB->isCallee(U))
3727       return 0;
3728 
3729     unsigned ArgNo = CB->getArgOperandNo(U);
3730     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3731     // As long as we only use known information there is no need to track
3732     // dependences here.
3733     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3734     MA = MaybeAlign(AlignAA.getKnownAlign());
3735   }
3736 
3737   const DataLayout &DL = A.getDataLayout();
3738   const Value *UseV = U->get();
3739   if (auto *SI = dyn_cast<StoreInst>(I)) {
3740     if (SI->getPointerOperand() == UseV)
3741       MA = SI->getAlign();
3742   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3743     if (LI->getPointerOperand() == UseV)
3744       MA = LI->getAlign();
3745   }
3746 
3747   if (!MA || *MA <= QueryingAA.getKnownAlign())
3748     return 0;
3749 
3750   unsigned Alignment = MA->value();
3751   int64_t Offset;
3752 
3753   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3754     if (Base == &AssociatedValue) {
3755       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3756       // So we can say that the maximum power of two which is a divisor of
3757       // gcd(Offset, Alignment) is an alignment.
3758 
3759       uint32_t gcd =
3760           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3761       Alignment = llvm::PowerOf2Floor(gcd);
3762     }
3763   }
3764 
3765   return Alignment;
3766 }
3767 
3768 struct AAAlignImpl : AAAlign {
3769   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3770 
3771   /// See AbstractAttribute::initialize(...).
3772   void initialize(Attributor &A) override {
3773     SmallVector<Attribute, 4> Attrs;
3774     getAttrs({Attribute::Alignment}, Attrs);
3775     for (const Attribute &Attr : Attrs)
3776       takeKnownMaximum(Attr.getValueAsInt());
3777 
3778     Value &V = getAssociatedValue();
3779     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3780     //       use of the function pointer. This was caused by D73131. We want to
3781     //       avoid this for function pointers especially because we iterate
3782     //       their uses and int2ptr is not handled. It is not a correctness
3783     //       problem though!
3784     if (!V.getType()->getPointerElementType()->isFunctionTy())
3785       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3786 
3787     if (getIRPosition().isFnInterfaceKind() &&
3788         (!getAnchorScope() ||
3789          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3790       indicatePessimisticFixpoint();
3791       return;
3792     }
3793 
3794     if (Instruction *CtxI = getCtxI())
3795       followUsesInMBEC(*this, A, getState(), *CtxI);
3796   }
3797 
3798   /// See AbstractAttribute::manifest(...).
3799   ChangeStatus manifest(Attributor &A) override {
3800     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3801 
3802     // Check for users that allow alignment annotations.
3803     Value &AssociatedValue = getAssociatedValue();
3804     for (const Use &U : AssociatedValue.uses()) {
3805       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3806         if (SI->getPointerOperand() == &AssociatedValue)
3807           if (SI->getAlignment() < getAssumedAlign()) {
3808             STATS_DECLTRACK(AAAlign, Store,
3809                             "Number of times alignment added to a store");
3810             SI->setAlignment(Align(getAssumedAlign()));
3811             LoadStoreChanged = ChangeStatus::CHANGED;
3812           }
3813       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3814         if (LI->getPointerOperand() == &AssociatedValue)
3815           if (LI->getAlignment() < getAssumedAlign()) {
3816             LI->setAlignment(Align(getAssumedAlign()));
3817             STATS_DECLTRACK(AAAlign, Load,
3818                             "Number of times alignment added to a load");
3819             LoadStoreChanged = ChangeStatus::CHANGED;
3820           }
3821       }
3822     }
3823 
3824     ChangeStatus Changed = AAAlign::manifest(A);
3825 
3826     Align InheritAlign =
3827         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3828     if (InheritAlign >= getAssumedAlign())
3829       return LoadStoreChanged;
3830     return Changed | LoadStoreChanged;
3831   }
3832 
3833   // TODO: Provide a helper to determine the implied ABI alignment and check in
3834   //       the existing manifest method and a new one for AAAlignImpl that value
3835   //       to avoid making the alignment explicit if it did not improve.
3836 
3837   /// See AbstractAttribute::getDeducedAttributes
3838   virtual void
3839   getDeducedAttributes(LLVMContext &Ctx,
3840                        SmallVectorImpl<Attribute> &Attrs) const override {
3841     if (getAssumedAlign() > 1)
3842       Attrs.emplace_back(
3843           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3844   }
3845 
3846   /// See followUsesInMBEC
3847   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3848                        AAAlign::StateType &State) {
3849     bool TrackUse = false;
3850 
3851     unsigned int KnownAlign =
3852         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3853     State.takeKnownMaximum(KnownAlign);
3854 
3855     return TrackUse;
3856   }
3857 
3858   /// See AbstractAttribute::getAsStr().
3859   const std::string getAsStr() const override {
3860     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3861                                 "-" + std::to_string(getAssumedAlign()) + ">")
3862                              : "unknown-align";
3863   }
3864 };
3865 
3866 /// Align attribute for a floating value.
3867 struct AAAlignFloating : AAAlignImpl {
3868   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3869 
3870   /// See AbstractAttribute::updateImpl(...).
3871   ChangeStatus updateImpl(Attributor &A) override {
3872     const DataLayout &DL = A.getDataLayout();
3873 
3874     auto VisitValueCB = [&](Value &V, const Instruction *,
3875                             AAAlign::StateType &T, bool Stripped) -> bool {
3876       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3877                                            DepClassTy::REQUIRED);
3878       if (!Stripped && this == &AA) {
3879         int64_t Offset;
3880         unsigned Alignment = 1;
3881         if (const Value *Base =
3882                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3883           Align PA = Base->getPointerAlignment(DL);
3884           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3885           // So we can say that the maximum power of two which is a divisor of
3886           // gcd(Offset, Alignment) is an alignment.
3887 
3888           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3889                                                uint32_t(PA.value()));
3890           Alignment = llvm::PowerOf2Floor(gcd);
3891         } else {
3892           Alignment = V.getPointerAlignment(DL).value();
3893         }
3894         // Use only IR information if we did not strip anything.
3895         T.takeKnownMaximum(Alignment);
3896         T.indicatePessimisticFixpoint();
3897       } else {
3898         // Use abstract attribute information.
3899         const AAAlign::StateType &DS = AA.getState();
3900         T ^= DS;
3901       }
3902       return T.isValidState();
3903     };
3904 
3905     StateType T;
3906     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3907                                                    VisitValueCB, getCtxI()))
3908       return indicatePessimisticFixpoint();
3909 
3910     // TODO: If we know we visited all incoming values, thus no are assumed
3911     // dead, we can take the known information from the state T.
3912     return clampStateAndIndicateChange(getState(), T);
3913   }
3914 
3915   /// See AbstractAttribute::trackStatistics()
3916   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3917 };
3918 
3919 /// Align attribute for function return value.
3920 struct AAAlignReturned final
3921     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3922   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3923   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3924 
3925   /// See AbstractAttribute::initialize(...).
3926   void initialize(Attributor &A) override {
3927     Base::initialize(A);
3928     Function *F = getAssociatedFunction();
3929     if (!F || F->isDeclaration())
3930       indicatePessimisticFixpoint();
3931   }
3932 
3933   /// See AbstractAttribute::trackStatistics()
3934   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3935 };
3936 
3937 /// Align attribute for function argument.
3938 struct AAAlignArgument final
3939     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3940   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3941   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3942 
3943   /// See AbstractAttribute::manifest(...).
3944   ChangeStatus manifest(Attributor &A) override {
3945     // If the associated argument is involved in a must-tail call we give up
3946     // because we would need to keep the argument alignments of caller and
3947     // callee in-sync. Just does not seem worth the trouble right now.
3948     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3949       return ChangeStatus::UNCHANGED;
3950     return Base::manifest(A);
3951   }
3952 
3953   /// See AbstractAttribute::trackStatistics()
3954   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3955 };
3956 
3957 struct AAAlignCallSiteArgument final : AAAlignFloating {
3958   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3959       : AAAlignFloating(IRP, A) {}
3960 
3961   /// See AbstractAttribute::manifest(...).
3962   ChangeStatus manifest(Attributor &A) override {
3963     // If the associated argument is involved in a must-tail call we give up
3964     // because we would need to keep the argument alignments of caller and
3965     // callee in-sync. Just does not seem worth the trouble right now.
3966     if (Argument *Arg = getAssociatedArgument())
3967       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3968         return ChangeStatus::UNCHANGED;
3969     ChangeStatus Changed = AAAlignImpl::manifest(A);
3970     Align InheritAlign =
3971         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3972     if (InheritAlign >= getAssumedAlign())
3973       Changed = ChangeStatus::UNCHANGED;
3974     return Changed;
3975   }
3976 
3977   /// See AbstractAttribute::updateImpl(Attributor &A).
3978   ChangeStatus updateImpl(Attributor &A) override {
3979     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3980     if (Argument *Arg = getAssociatedArgument()) {
3981       // We only take known information from the argument
3982       // so we do not need to track a dependence.
3983       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3984           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3985       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3986     }
3987     return Changed;
3988   }
3989 
3990   /// See AbstractAttribute::trackStatistics()
3991   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3992 };
3993 
3994 /// Align attribute deduction for a call site return value.
3995 struct AAAlignCallSiteReturned final
3996     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3997   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3998   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3999       : Base(IRP, A) {}
4000 
4001   /// See AbstractAttribute::initialize(...).
4002   void initialize(Attributor &A) override {
4003     Base::initialize(A);
4004     Function *F = getAssociatedFunction();
4005     if (!F || F->isDeclaration())
4006       indicatePessimisticFixpoint();
4007   }
4008 
4009   /// See AbstractAttribute::trackStatistics()
4010   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4011 };
4012 
4013 /// ------------------ Function No-Return Attribute ----------------------------
4014 struct AANoReturnImpl : public AANoReturn {
4015   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4016 
4017   /// See AbstractAttribute::initialize(...).
4018   void initialize(Attributor &A) override {
4019     AANoReturn::initialize(A);
4020     Function *F = getAssociatedFunction();
4021     if (!F || F->isDeclaration())
4022       indicatePessimisticFixpoint();
4023   }
4024 
4025   /// See AbstractAttribute::getAsStr().
4026   const std::string getAsStr() const override {
4027     return getAssumed() ? "noreturn" : "may-return";
4028   }
4029 
4030   /// See AbstractAttribute::updateImpl(Attributor &A).
4031   virtual ChangeStatus updateImpl(Attributor &A) override {
4032     auto CheckForNoReturn = [](Instruction &) { return false; };
4033     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4034                                    {(unsigned)Instruction::Ret}))
4035       return indicatePessimisticFixpoint();
4036     return ChangeStatus::UNCHANGED;
4037   }
4038 };
4039 
4040 struct AANoReturnFunction final : AANoReturnImpl {
4041   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4042       : AANoReturnImpl(IRP, A) {}
4043 
4044   /// See AbstractAttribute::trackStatistics()
4045   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4046 };
4047 
4048 /// NoReturn attribute deduction for a call sites.
4049 struct AANoReturnCallSite final : AANoReturnImpl {
4050   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4051       : AANoReturnImpl(IRP, A) {}
4052 
4053   /// See AbstractAttribute::initialize(...).
4054   void initialize(Attributor &A) override {
4055     AANoReturnImpl::initialize(A);
4056     if (Function *F = getAssociatedFunction()) {
4057       const IRPosition &FnPos = IRPosition::function(*F);
4058       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4059       if (!FnAA.isAssumedNoReturn())
4060         indicatePessimisticFixpoint();
4061     }
4062   }
4063 
4064   /// See AbstractAttribute::updateImpl(...).
4065   ChangeStatus updateImpl(Attributor &A) override {
4066     // TODO: Once we have call site specific value information we can provide
4067     //       call site specific liveness information and then it makes
4068     //       sense to specialize attributes for call sites arguments instead of
4069     //       redirecting requests to the callee argument.
4070     Function *F = getAssociatedFunction();
4071     const IRPosition &FnPos = IRPosition::function(*F);
4072     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4073     return clampStateAndIndicateChange(getState(), FnAA.getState());
4074   }
4075 
4076   /// See AbstractAttribute::trackStatistics()
4077   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4078 };
4079 
4080 /// ----------------------- Variable Capturing ---------------------------------
4081 
4082 /// A class to hold the state of for no-capture attributes.
4083 struct AANoCaptureImpl : public AANoCapture {
4084   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4085 
4086   /// See AbstractAttribute::initialize(...).
4087   void initialize(Attributor &A) override {
4088     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4089       indicateOptimisticFixpoint();
4090       return;
4091     }
4092     Function *AnchorScope = getAnchorScope();
4093     if (isFnInterfaceKind() &&
4094         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4095       indicatePessimisticFixpoint();
4096       return;
4097     }
4098 
4099     // You cannot "capture" null in the default address space.
4100     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4101         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4102       indicateOptimisticFixpoint();
4103       return;
4104     }
4105 
4106     const Function *F =
4107         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4108 
4109     // Check what state the associated function can actually capture.
4110     if (F)
4111       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4112     else
4113       indicatePessimisticFixpoint();
4114   }
4115 
4116   /// See AbstractAttribute::updateImpl(...).
4117   ChangeStatus updateImpl(Attributor &A) override;
4118 
4119   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4120   virtual void
4121   getDeducedAttributes(LLVMContext &Ctx,
4122                        SmallVectorImpl<Attribute> &Attrs) const override {
4123     if (!isAssumedNoCaptureMaybeReturned())
4124       return;
4125 
4126     if (isArgumentPosition()) {
4127       if (isAssumedNoCapture())
4128         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4129       else if (ManifestInternal)
4130         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4131     }
4132   }
4133 
4134   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4135   /// depending on the ability of the function associated with \p IRP to capture
4136   /// state in memory and through "returning/throwing", respectively.
4137   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4138                                                    const Function &F,
4139                                                    BitIntegerState &State) {
4140     // TODO: Once we have memory behavior attributes we should use them here.
4141 
4142     // If we know we cannot communicate or write to memory, we do not care about
4143     // ptr2int anymore.
4144     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4145         F.getReturnType()->isVoidTy()) {
4146       State.addKnownBits(NO_CAPTURE);
4147       return;
4148     }
4149 
4150     // A function cannot capture state in memory if it only reads memory, it can
4151     // however return/throw state and the state might be influenced by the
4152     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4153     if (F.onlyReadsMemory())
4154       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4155 
4156     // A function cannot communicate state back if it does not through
4157     // exceptions and doesn not return values.
4158     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4159       State.addKnownBits(NOT_CAPTURED_IN_RET);
4160 
4161     // Check existing "returned" attributes.
4162     int ArgNo = IRP.getCalleeArgNo();
4163     if (F.doesNotThrow() && ArgNo >= 0) {
4164       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4165         if (F.hasParamAttribute(u, Attribute::Returned)) {
4166           if (u == unsigned(ArgNo))
4167             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4168           else if (F.onlyReadsMemory())
4169             State.addKnownBits(NO_CAPTURE);
4170           else
4171             State.addKnownBits(NOT_CAPTURED_IN_RET);
4172           break;
4173         }
4174     }
4175   }
4176 
4177   /// See AbstractState::getAsStr().
4178   const std::string getAsStr() const override {
4179     if (isKnownNoCapture())
4180       return "known not-captured";
4181     if (isAssumedNoCapture())
4182       return "assumed not-captured";
4183     if (isKnownNoCaptureMaybeReturned())
4184       return "known not-captured-maybe-returned";
4185     if (isAssumedNoCaptureMaybeReturned())
4186       return "assumed not-captured-maybe-returned";
4187     return "assumed-captured";
4188   }
4189 };
4190 
4191 /// Attributor-aware capture tracker.
4192 struct AACaptureUseTracker final : public CaptureTracker {
4193 
4194   /// Create a capture tracker that can lookup in-flight abstract attributes
4195   /// through the Attributor \p A.
4196   ///
4197   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4198   /// search is stopped. If a use leads to a return instruction,
4199   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4200   /// If a use leads to a ptr2int which may capture the value,
4201   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4202   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4203   /// set. All values in \p PotentialCopies are later tracked as well. For every
4204   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4205   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4206   /// conservatively set to true.
4207   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4208                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4209                       SmallVectorImpl<const Value *> &PotentialCopies,
4210                       unsigned &RemainingUsesToExplore)
4211       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4212         PotentialCopies(PotentialCopies),
4213         RemainingUsesToExplore(RemainingUsesToExplore) {}
4214 
4215   /// Determine if \p V maybe captured. *Also updates the state!*
4216   bool valueMayBeCaptured(const Value *V) {
4217     if (V->getType()->isPointerTy()) {
4218       PointerMayBeCaptured(V, this);
4219     } else {
4220       State.indicatePessimisticFixpoint();
4221     }
4222     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4223   }
4224 
4225   /// See CaptureTracker::tooManyUses().
4226   void tooManyUses() override {
4227     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4228   }
4229 
4230   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4231     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4232       return true;
4233     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4234         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4235     return DerefAA.getAssumedDereferenceableBytes();
4236   }
4237 
4238   /// See CaptureTracker::captured(...).
4239   bool captured(const Use *U) override {
4240     Instruction *UInst = cast<Instruction>(U->getUser());
4241     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4242                       << "\n");
4243 
4244     // Because we may reuse the tracker multiple times we keep track of the
4245     // number of explored uses ourselves as well.
4246     if (RemainingUsesToExplore-- == 0) {
4247       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4248       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4249                           /* Return */ true);
4250     }
4251 
4252     // Deal with ptr2int by following uses.
4253     if (isa<PtrToIntInst>(UInst)) {
4254       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4255       return valueMayBeCaptured(UInst);
4256     }
4257 
4258     // Explicitly catch return instructions.
4259     if (isa<ReturnInst>(UInst))
4260       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4261                           /* Return */ true);
4262 
4263     // For now we only use special logic for call sites. However, the tracker
4264     // itself knows about a lot of other non-capturing cases already.
4265     auto *CB = dyn_cast<CallBase>(UInst);
4266     if (!CB || !CB->isArgOperand(U))
4267       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4268                           /* Return */ true);
4269 
4270     unsigned ArgNo = CB->getArgOperandNo(U);
4271     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4272     // If we have a abstract no-capture attribute for the argument we can use
4273     // it to justify a non-capture attribute here. This allows recursion!
4274     auto &ArgNoCaptureAA =
4275         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4276     if (ArgNoCaptureAA.isAssumedNoCapture())
4277       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4278                           /* Return */ false);
4279     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4280       addPotentialCopy(*CB);
4281       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4282                           /* Return */ false);
4283     }
4284 
4285     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4286     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4287                         /* Return */ true);
4288   }
4289 
4290   /// Register \p CS as potential copy of the value we are checking.
4291   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4292 
4293   /// See CaptureTracker::shouldExplore(...).
4294   bool shouldExplore(const Use *U) override {
4295     // Check liveness and ignore droppable users.
4296     return !U->getUser()->isDroppable() &&
4297            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4298   }
4299 
4300   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4301   /// \p CapturedInRet, then return the appropriate value for use in the
4302   /// CaptureTracker::captured() interface.
4303   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4304                     bool CapturedInRet) {
4305     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4306                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4307     if (CapturedInMem)
4308       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4309     if (CapturedInInt)
4310       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4311     if (CapturedInRet)
4312       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4313     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4314   }
4315 
4316 private:
4317   /// The attributor providing in-flight abstract attributes.
4318   Attributor &A;
4319 
4320   /// The abstract attribute currently updated.
4321   AANoCapture &NoCaptureAA;
4322 
4323   /// The abstract liveness state.
4324   const AAIsDead &IsDeadAA;
4325 
4326   /// The state currently updated.
4327   AANoCapture::StateType &State;
4328 
4329   /// Set of potential copies of the tracked value.
4330   SmallVectorImpl<const Value *> &PotentialCopies;
4331 
4332   /// Global counter to limit the number of explored uses.
4333   unsigned &RemainingUsesToExplore;
4334 };
4335 
4336 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4337   const IRPosition &IRP = getIRPosition();
4338   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4339                                         : &IRP.getAssociatedValue();
4340   if (!V)
4341     return indicatePessimisticFixpoint();
4342 
4343   const Function *F =
4344       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4345   assert(F && "Expected a function!");
4346   const IRPosition &FnPos = IRPosition::function(*F);
4347   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4348 
4349   AANoCapture::StateType T;
4350 
4351   // Readonly means we cannot capture through memory.
4352   const auto &FnMemAA =
4353       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4354   if (FnMemAA.isAssumedReadOnly()) {
4355     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4356     if (FnMemAA.isKnownReadOnly())
4357       addKnownBits(NOT_CAPTURED_IN_MEM);
4358     else
4359       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4360   }
4361 
4362   // Make sure all returned values are different than the underlying value.
4363   // TODO: we could do this in a more sophisticated way inside
4364   //       AAReturnedValues, e.g., track all values that escape through returns
4365   //       directly somehow.
4366   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4367     bool SeenConstant = false;
4368     for (auto &It : RVAA.returned_values()) {
4369       if (isa<Constant>(It.first)) {
4370         if (SeenConstant)
4371           return false;
4372         SeenConstant = true;
4373       } else if (!isa<Argument>(It.first) ||
4374                  It.first == getAssociatedArgument())
4375         return false;
4376     }
4377     return true;
4378   };
4379 
4380   const auto &NoUnwindAA =
4381       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4382   if (NoUnwindAA.isAssumedNoUnwind()) {
4383     bool IsVoidTy = F->getReturnType()->isVoidTy();
4384     const AAReturnedValues *RVAA =
4385         IsVoidTy ? nullptr
4386                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4387 
4388                                                  DepClassTy::OPTIONAL);
4389     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4390       T.addKnownBits(NOT_CAPTURED_IN_RET);
4391       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4392         return ChangeStatus::UNCHANGED;
4393       if (NoUnwindAA.isKnownNoUnwind() &&
4394           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4395         addKnownBits(NOT_CAPTURED_IN_RET);
4396         if (isKnown(NOT_CAPTURED_IN_MEM))
4397           return indicateOptimisticFixpoint();
4398       }
4399     }
4400   }
4401 
4402   // Use the CaptureTracker interface and logic with the specialized tracker,
4403   // defined in AACaptureUseTracker, that can look at in-flight abstract
4404   // attributes and directly updates the assumed state.
4405   SmallVector<const Value *, 4> PotentialCopies;
4406   unsigned RemainingUsesToExplore =
4407       getDefaultMaxUsesToExploreForCaptureTracking();
4408   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4409                               RemainingUsesToExplore);
4410 
4411   // Check all potential copies of the associated value until we can assume
4412   // none will be captured or we have to assume at least one might be.
4413   unsigned Idx = 0;
4414   PotentialCopies.push_back(V);
4415   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4416     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4417 
4418   AANoCapture::StateType &S = getState();
4419   auto Assumed = S.getAssumed();
4420   S.intersectAssumedBits(T.getAssumed());
4421   if (!isAssumedNoCaptureMaybeReturned())
4422     return indicatePessimisticFixpoint();
4423   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4424                                    : ChangeStatus::CHANGED;
4425 }
4426 
4427 /// NoCapture attribute for function arguments.
4428 struct AANoCaptureArgument final : AANoCaptureImpl {
4429   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4430       : AANoCaptureImpl(IRP, A) {}
4431 
4432   /// See AbstractAttribute::trackStatistics()
4433   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4434 };
4435 
4436 /// NoCapture attribute for call site arguments.
4437 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4438   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4439       : AANoCaptureImpl(IRP, A) {}
4440 
4441   /// See AbstractAttribute::initialize(...).
4442   void initialize(Attributor &A) override {
4443     if (Argument *Arg = getAssociatedArgument())
4444       if (Arg->hasByValAttr())
4445         indicateOptimisticFixpoint();
4446     AANoCaptureImpl::initialize(A);
4447   }
4448 
4449   /// See AbstractAttribute::updateImpl(...).
4450   ChangeStatus updateImpl(Attributor &A) override {
4451     // TODO: Once we have call site specific value information we can provide
4452     //       call site specific liveness information and then it makes
4453     //       sense to specialize attributes for call sites arguments instead of
4454     //       redirecting requests to the callee argument.
4455     Argument *Arg = getAssociatedArgument();
4456     if (!Arg)
4457       return indicatePessimisticFixpoint();
4458     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4459     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4460     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4461   }
4462 
4463   /// See AbstractAttribute::trackStatistics()
4464   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4465 };
4466 
4467 /// NoCapture attribute for floating values.
4468 struct AANoCaptureFloating final : AANoCaptureImpl {
4469   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4470       : AANoCaptureImpl(IRP, A) {}
4471 
4472   /// See AbstractAttribute::trackStatistics()
4473   void trackStatistics() const override {
4474     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4475   }
4476 };
4477 
4478 /// NoCapture attribute for function return value.
4479 struct AANoCaptureReturned final : AANoCaptureImpl {
4480   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4481       : AANoCaptureImpl(IRP, A) {
4482     llvm_unreachable("NoCapture is not applicable to function returns!");
4483   }
4484 
4485   /// See AbstractAttribute::initialize(...).
4486   void initialize(Attributor &A) override {
4487     llvm_unreachable("NoCapture is not applicable to function returns!");
4488   }
4489 
4490   /// See AbstractAttribute::updateImpl(...).
4491   ChangeStatus updateImpl(Attributor &A) override {
4492     llvm_unreachable("NoCapture is not applicable to function returns!");
4493   }
4494 
4495   /// See AbstractAttribute::trackStatistics()
4496   void trackStatistics() const override {}
4497 };
4498 
4499 /// NoCapture attribute deduction for a call site return value.
4500 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4501   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4502       : AANoCaptureImpl(IRP, A) {}
4503 
4504   /// See AbstractAttribute::initialize(...).
4505   void initialize(Attributor &A) override {
4506     const Function *F = getAnchorScope();
4507     // Check what state the associated function can actually capture.
4508     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4509   }
4510 
4511   /// See AbstractAttribute::trackStatistics()
4512   void trackStatistics() const override {
4513     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4514   }
4515 };
4516 
4517 /// ------------------ Value Simplify Attribute ----------------------------
4518 struct AAValueSimplifyImpl : AAValueSimplify {
4519   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4520       : AAValueSimplify(IRP, A) {}
4521 
4522   /// See AbstractAttribute::initialize(...).
4523   void initialize(Attributor &A) override {
4524     if (getAssociatedValue().getType()->isVoidTy())
4525       indicatePessimisticFixpoint();
4526   }
4527 
4528   /// See AbstractAttribute::getAsStr().
4529   const std::string getAsStr() const override {
4530     LLVM_DEBUG({
4531       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
4532       if (SimplifiedAssociatedValue)
4533         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
4534     });
4535     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4536                         : "not-simple";
4537   }
4538 
4539   /// See AbstractAttribute::trackStatistics()
4540   void trackStatistics() const override {}
4541 
4542   /// See AAValueSimplify::getAssumedSimplifiedValue()
4543   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4544     if (!getAssumed())
4545       return const_cast<Value *>(&getAssociatedValue());
4546     return SimplifiedAssociatedValue;
4547   }
4548 
4549   /// Helper function for querying AAValueSimplify and updating candicate.
4550   /// \param QueryingValue Value trying to unify with SimplifiedValue
4551   /// \param AccumulatedSimplifiedValue Current simplification result.
4552   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4553                              Value &QueryingValue,
4554                              Optional<Value *> &AccumulatedSimplifiedValue) {
4555     // FIXME: Add a typecast support.
4556 
4557     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4558         QueryingAA,
4559         IRPosition::value(QueryingValue, QueryingAA.getCallBaseContext()),
4560         DepClassTy::REQUIRED);
4561 
4562     Optional<Value *> QueryingValueSimplified =
4563         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4564 
4565     if (!QueryingValueSimplified.hasValue())
4566       return true;
4567 
4568     if (!QueryingValueSimplified.getValue())
4569       return false;
4570 
4571     Value &QueryingValueSimplifiedUnwrapped =
4572         *QueryingValueSimplified.getValue();
4573 
4574     if (AccumulatedSimplifiedValue.hasValue() &&
4575         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4576         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4577       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4578     if (AccumulatedSimplifiedValue.hasValue() &&
4579         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4580       return true;
4581 
4582     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4583                       << " is assumed to be "
4584                       << QueryingValueSimplifiedUnwrapped << "\n");
4585 
4586     AccumulatedSimplifiedValue = QueryingValueSimplified;
4587     return true;
4588   }
4589 
4590   /// Returns a candidate is found or not
4591   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4592     if (!getAssociatedValue().getType()->isIntegerTy())
4593       return false;
4594 
4595     // This will also pass the call base context.
4596     const auto &AA =
4597         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4598 
4599     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4600 
4601     if (!COpt.hasValue()) {
4602       SimplifiedAssociatedValue = llvm::None;
4603       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4604       return true;
4605     }
4606     if (auto *C = COpt.getValue()) {
4607       SimplifiedAssociatedValue = C;
4608       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4609       return true;
4610     }
4611     return false;
4612   }
4613 
4614   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4615     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4616       return true;
4617     if (askSimplifiedValueFor<AAPotentialValues>(A))
4618       return true;
4619     return false;
4620   }
4621 
4622   /// See AbstractAttribute::manifest(...).
4623   ChangeStatus manifest(Attributor &A) override {
4624     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4625 
4626     if (SimplifiedAssociatedValue.hasValue() &&
4627         !SimplifiedAssociatedValue.getValue())
4628       return Changed;
4629 
4630     Value &V = getAssociatedValue();
4631     auto *C = SimplifiedAssociatedValue.hasValue()
4632                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4633                   : UndefValue::get(V.getType());
4634     if (C && C != &V) {
4635       Value *NewV = AA::getWithType(*C, *V.getType());
4636       // We can replace the AssociatedValue with the constant.
4637       if (!V.user_empty() && &V != C && NewV) {
4638         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4639                           << " :: " << *this << "\n");
4640         if (A.changeValueAfterManifest(V, *NewV))
4641           Changed = ChangeStatus::CHANGED;
4642       }
4643     }
4644 
4645     return Changed | AAValueSimplify::manifest(A);
4646   }
4647 
4648   /// See AbstractState::indicatePessimisticFixpoint(...).
4649   ChangeStatus indicatePessimisticFixpoint() override {
4650     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4651     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4652     SimplifiedAssociatedValue = &getAssociatedValue();
4653     indicateOptimisticFixpoint();
4654     return ChangeStatus::CHANGED;
4655   }
4656 
4657 protected:
4658   // An assumed simplified value. Initially, it is set to Optional::None, which
4659   // means that the value is not clear under current assumption. If in the
4660   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4661   // returns orignal associated value.
4662   Optional<Value *> SimplifiedAssociatedValue;
4663 };
4664 
4665 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4666   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4667       : AAValueSimplifyImpl(IRP, A) {}
4668 
4669   void initialize(Attributor &A) override {
4670     AAValueSimplifyImpl::initialize(A);
4671     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4672       indicatePessimisticFixpoint();
4673     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4674                  Attribute::StructRet, Attribute::Nest},
4675                 /* IgnoreSubsumingPositions */ true))
4676       indicatePessimisticFixpoint();
4677 
4678     // FIXME: This is a hack to prevent us from propagating function poiner in
4679     // the new pass manager CGSCC pass as it creates call edges the
4680     // CallGraphUpdater cannot handle yet.
4681     Value &V = getAssociatedValue();
4682     if (V.getType()->isPointerTy() &&
4683         V.getType()->getPointerElementType()->isFunctionTy() &&
4684         !A.isModulePass())
4685       indicatePessimisticFixpoint();
4686   }
4687 
4688   /// See AbstractAttribute::updateImpl(...).
4689   ChangeStatus updateImpl(Attributor &A) override {
4690     // Byval is only replacable if it is readonly otherwise we would write into
4691     // the replaced value and not the copy that byval creates implicitly.
4692     Argument *Arg = getAssociatedArgument();
4693     if (Arg->hasByValAttr()) {
4694       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4695       //       there is no race by not copying a constant byval.
4696       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4697                                                        DepClassTy::REQUIRED);
4698       if (!MemAA.isAssumedReadOnly())
4699         return indicatePessimisticFixpoint();
4700     }
4701 
4702     auto Before = SimplifiedAssociatedValue;
4703 
4704     auto PredForCallSite = [&](AbstractCallSite ACS) {
4705       const IRPosition &ACSArgPos =
4706           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4707       // Check if a coresponding argument was found or if it is on not
4708       // associated (which can happen for callback calls).
4709       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4710         return false;
4711 
4712       // We can only propagate thread independent values through callbacks.
4713       // This is different to direct/indirect call sites because for them we
4714       // know the thread executing the caller and callee is the same. For
4715       // callbacks this is not guaranteed, thus a thread dependent value could
4716       // be different for the caller and callee, making it invalid to propagate.
4717       Value &ArgOp = ACSArgPos.getAssociatedValue();
4718       if (ACS.isCallbackCall())
4719         if (auto *C = dyn_cast<Constant>(&ArgOp))
4720           if (C->isThreadDependent())
4721             return false;
4722       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4723     };
4724 
4725     // Generate a answer specific to a call site context.
4726     bool Success;
4727     bool AllCallSitesKnown;
4728     if (hasCallBaseContext())
4729       Success = PredForCallSite(
4730           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
4731     else
4732       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
4733                                        AllCallSitesKnown);
4734 
4735     if (!Success)
4736       if (!askSimplifiedValueForOtherAAs(A))
4737         return indicatePessimisticFixpoint();
4738 
4739     // If a candicate was found in this update, return CHANGED.
4740     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4741                                                : ChangeStatus ::CHANGED;
4742   }
4743 
4744   /// See AbstractAttribute::trackStatistics()
4745   void trackStatistics() const override {
4746     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4747   }
4748 };
4749 
4750 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4751   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4752       : AAValueSimplifyImpl(IRP, A) {}
4753 
4754   /// See AbstractAttribute::updateImpl(...).
4755   ChangeStatus updateImpl(Attributor &A) override {
4756     auto Before = SimplifiedAssociatedValue;
4757 
4758     auto PredForReturned = [&](Value &V) {
4759       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4760     };
4761 
4762     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4763       if (!askSimplifiedValueForOtherAAs(A))
4764         return indicatePessimisticFixpoint();
4765 
4766     // If a candicate was found in this update, return CHANGED.
4767     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4768                                                : ChangeStatus ::CHANGED;
4769   }
4770 
4771   ChangeStatus manifest(Attributor &A) override {
4772     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4773 
4774     if (SimplifiedAssociatedValue.hasValue() &&
4775         !SimplifiedAssociatedValue.getValue())
4776       return Changed;
4777 
4778     Value &V = getAssociatedValue();
4779     auto *C = SimplifiedAssociatedValue.hasValue()
4780                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4781                   : UndefValue::get(V.getType());
4782     if (C && C != &V) {
4783       auto PredForReturned =
4784           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4785             // We can replace the AssociatedValue with the constant.
4786             if (&V == C || isa<UndefValue>(V))
4787               return true;
4788 
4789             for (ReturnInst *RI : RetInsts) {
4790               if (RI->getFunction() != getAnchorScope())
4791                 continue;
4792               Value *NewV =
4793                   AA::getWithType(*C, *RI->getReturnValue()->getType());
4794               if (!NewV)
4795                 continue;
4796               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *NewV
4797                                 << " in " << *RI << " :: " << *this << "\n");
4798               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
4799                 Changed = ChangeStatus::CHANGED;
4800             }
4801             return true;
4802           };
4803       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4804     }
4805 
4806     return Changed | AAValueSimplify::manifest(A);
4807   }
4808 
4809   /// See AbstractAttribute::trackStatistics()
4810   void trackStatistics() const override {
4811     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4812   }
4813 };
4814 
4815 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4816   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4817       : AAValueSimplifyImpl(IRP, A) {}
4818 
4819   /// See AbstractAttribute::initialize(...).
4820   void initialize(Attributor &A) override {
4821     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4822     //        Needs investigation.
4823     // AAValueSimplifyImpl::initialize(A);
4824     Value &V = getAnchorValue();
4825 
4826     // TODO: add other stuffs
4827     if (isa<Constant>(V))
4828       indicatePessimisticFixpoint();
4829   }
4830 
4831   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4832   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4833   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4834   /// updated and \p Changed is set appropriately.
4835   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4836                               ChangeStatus &Changed) {
4837     if (!ICmp)
4838       return false;
4839     if (!ICmp->isEquality())
4840       return false;
4841 
4842     // This is a comparison with == or !-. We check for nullptr now.
4843     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4844     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4845     if (!Op0IsNull && !Op1IsNull)
4846       return false;
4847 
4848     LLVMContext &Ctx = ICmp->getContext();
4849     // Check for `nullptr ==/!= nullptr` first:
4850     if (Op0IsNull && Op1IsNull) {
4851       Value *NewVal = ConstantInt::get(
4852           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4853       assert(!SimplifiedAssociatedValue.hasValue() &&
4854              "Did not expect non-fixed value for constant comparison");
4855       SimplifiedAssociatedValue = NewVal;
4856       indicateOptimisticFixpoint();
4857       Changed = ChangeStatus::CHANGED;
4858       return true;
4859     }
4860 
4861     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4862     // non-nullptr operand and if we assume it's non-null we can conclude the
4863     // result of the comparison.
4864     assert((Op0IsNull || Op1IsNull) &&
4865            "Expected nullptr versus non-nullptr comparison at this point");
4866 
4867     // The index is the operand that we assume is not null.
4868     unsigned PtrIdx = Op0IsNull;
4869     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4870         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4871         DepClassTy::REQUIRED);
4872     if (!PtrNonNullAA.isAssumedNonNull())
4873       return false;
4874 
4875     // The new value depends on the predicate, true for != and false for ==.
4876     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4877                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4878 
4879     assert((!SimplifiedAssociatedValue.hasValue() ||
4880             SimplifiedAssociatedValue == NewVal) &&
4881            "Did not expect to change value for zero-comparison");
4882 
4883     auto Before = SimplifiedAssociatedValue;
4884     SimplifiedAssociatedValue = NewVal;
4885 
4886     if (PtrNonNullAA.isKnownNonNull())
4887       indicateOptimisticFixpoint();
4888 
4889     Changed = Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4890                                                   : ChangeStatus ::CHANGED;
4891     return true;
4892   }
4893 
4894   /// See AbstractAttribute::updateImpl(...).
4895   ChangeStatus updateImpl(Attributor &A) override {
4896     auto Before = SimplifiedAssociatedValue;
4897 
4898     ChangeStatus Changed;
4899     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4900                                Changed))
4901       return Changed;
4902 
4903     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4904                             bool Stripped) -> bool {
4905       auto &AA = A.getAAFor<AAValueSimplify>(
4906           *this, IRPosition::value(V, getCallBaseContext()),
4907           DepClassTy::REQUIRED);
4908       if (!Stripped && this == &AA) {
4909         // TODO: Look the instruction and check recursively.
4910 
4911         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4912                           << "\n");
4913         return false;
4914       }
4915       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4916     };
4917 
4918     bool Dummy = false;
4919     if (!genericValueTraversal<AAValueSimplify, bool>(
4920             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4921             /* UseValueSimplify */ false))
4922       if (!askSimplifiedValueForOtherAAs(A))
4923         return indicatePessimisticFixpoint();
4924 
4925     // If a candicate was found in this update, return CHANGED.
4926     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
4927                                                : ChangeStatus ::CHANGED;
4928   }
4929 
4930   /// See AbstractAttribute::trackStatistics()
4931   void trackStatistics() const override {
4932     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4933   }
4934 };
4935 
4936 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4937   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4938       : AAValueSimplifyImpl(IRP, A) {}
4939 
4940   /// See AbstractAttribute::initialize(...).
4941   void initialize(Attributor &A) override {
4942     SimplifiedAssociatedValue = &getAnchorValue();
4943     indicateOptimisticFixpoint();
4944   }
4945   /// See AbstractAttribute::initialize(...).
4946   ChangeStatus updateImpl(Attributor &A) override {
4947     llvm_unreachable(
4948         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4949   }
4950   /// See AbstractAttribute::trackStatistics()
4951   void trackStatistics() const override {
4952     STATS_DECLTRACK_FN_ATTR(value_simplify)
4953   }
4954 };
4955 
4956 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4957   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4958       : AAValueSimplifyFunction(IRP, A) {}
4959   /// See AbstractAttribute::trackStatistics()
4960   void trackStatistics() const override {
4961     STATS_DECLTRACK_CS_ATTR(value_simplify)
4962   }
4963 };
4964 
4965 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4966   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4967       : AAValueSimplifyReturned(IRP, A) {}
4968 
4969   /// See AbstractAttribute::manifest(...).
4970   ChangeStatus manifest(Attributor &A) override {
4971     return AAValueSimplifyImpl::manifest(A);
4972   }
4973 
4974   void trackStatistics() const override {
4975     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4976   }
4977 };
4978 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4979   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4980       : AAValueSimplifyFloating(IRP, A) {}
4981 
4982   /// See AbstractAttribute::manifest(...).
4983   ChangeStatus manifest(Attributor &A) override {
4984     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4985 
4986     if (SimplifiedAssociatedValue.hasValue() &&
4987         !SimplifiedAssociatedValue.getValue())
4988       return Changed;
4989 
4990     Value &V = getAssociatedValue();
4991     auto *C = SimplifiedAssociatedValue.hasValue()
4992                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4993                   : UndefValue::get(V.getType());
4994     if (C) {
4995       Use &U = cast<CallBase>(&getAnchorValue())
4996                    ->getArgOperandUse(getCallSiteArgNo());
4997       // We can replace the AssociatedValue with the constant.
4998       if (&V != C) {
4999         if (Value *NewV = AA::getWithType(*C, *V.getType()))
5000           if (A.changeUseAfterManifest(U, *NewV))
5001             Changed = ChangeStatus::CHANGED;
5002       }
5003     }
5004 
5005     return Changed | AAValueSimplify::manifest(A);
5006   }
5007 
5008   void trackStatistics() const override {
5009     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5010   }
5011 };
5012 
5013 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5014 struct AAHeapToStackImpl : public AAHeapToStack {
5015   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5016       : AAHeapToStack(IRP, A) {}
5017 
5018   const std::string getAsStr() const override {
5019     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5020   }
5021 
5022   ChangeStatus manifest(Attributor &A) override {
5023     assert(getState().isValidState() &&
5024            "Attempted to manifest an invalid state!");
5025 
5026     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5027     Function *F = getAnchorScope();
5028     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5029 
5030     for (Instruction *MallocCall : MallocCalls) {
5031       // This malloc cannot be replaced.
5032       if (BadMallocCalls.count(MallocCall))
5033         continue;
5034 
5035       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5036         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5037         A.deleteAfterManifest(*FreeCall);
5038         HasChanged = ChangeStatus::CHANGED;
5039       }
5040 
5041       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5042                         << "\n");
5043 
5044       Align Alignment;
5045       Value *Size;
5046       if (isCallocLikeFn(MallocCall, TLI)) {
5047         auto *Num = MallocCall->getOperand(0);
5048         auto *SizeT = MallocCall->getOperand(1);
5049         IRBuilder<> B(MallocCall);
5050         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5051       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5052         Size = MallocCall->getOperand(1);
5053         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5054                                    ->getValue()
5055                                    .getZExtValue())
5056                         .valueOrOne();
5057       } else {
5058         Size = MallocCall->getOperand(0);
5059       }
5060 
5061       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5062       Instruction *AI =
5063           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5064                          "", MallocCall->getNextNode());
5065 
5066       if (AI->getType() != MallocCall->getType())
5067         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5068                              AI->getNextNode());
5069 
5070       A.changeValueAfterManifest(*MallocCall, *AI);
5071 
5072       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5073         auto *NBB = II->getNormalDest();
5074         BranchInst::Create(NBB, MallocCall->getParent());
5075         A.deleteAfterManifest(*MallocCall);
5076       } else {
5077         A.deleteAfterManifest(*MallocCall);
5078       }
5079 
5080       // Zero out the allocated memory if it was a calloc.
5081       if (isCallocLikeFn(MallocCall, TLI)) {
5082         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5083                                    AI->getNextNode());
5084         Value *Ops[] = {
5085             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5086             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5087 
5088         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5089         Module *M = F->getParent();
5090         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5091         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5092       }
5093       HasChanged = ChangeStatus::CHANGED;
5094     }
5095 
5096     return HasChanged;
5097   }
5098 
5099   /// Collection of all malloc calls in a function.
5100   SmallSetVector<Instruction *, 4> MallocCalls;
5101 
5102   /// Collection of malloc calls that cannot be converted.
5103   DenseSet<const Instruction *> BadMallocCalls;
5104 
5105   /// A map for each malloc call to the set of associated free calls.
5106   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5107 
5108   ChangeStatus updateImpl(Attributor &A) override;
5109 };
5110 
5111 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5112   const Function *F = getAnchorScope();
5113   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5114 
5115   MustBeExecutedContextExplorer &Explorer =
5116       A.getInfoCache().getMustBeExecutedContextExplorer();
5117 
5118   auto FreeCheck = [&](Instruction &I) {
5119     const auto &Frees = FreesForMalloc.lookup(&I);
5120     if (Frees.size() != 1)
5121       return false;
5122     Instruction *UniqueFree = *Frees.begin();
5123     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5124   };
5125 
5126   auto UsesCheck = [&](Instruction &I) {
5127     bool ValidUsesOnly = true;
5128     bool MustUse = true;
5129     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5130       Instruction *UserI = cast<Instruction>(U.getUser());
5131       if (isa<LoadInst>(UserI))
5132         return true;
5133       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5134         if (SI->getValueOperand() == U.get()) {
5135           LLVM_DEBUG(dbgs()
5136                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5137           ValidUsesOnly = false;
5138         } else {
5139           // A store into the malloc'ed memory is fine.
5140         }
5141         return true;
5142       }
5143       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5144         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5145           return true;
5146         // Record malloc.
5147         if (isFreeCall(UserI, TLI)) {
5148           if (MustUse) {
5149             FreesForMalloc[&I].insert(UserI);
5150           } else {
5151             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5152                               << *UserI << "\n");
5153             ValidUsesOnly = false;
5154           }
5155           return true;
5156         }
5157 
5158         unsigned ArgNo = CB->getArgOperandNo(&U);
5159 
5160         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5161             *this, IRPosition::callsite_argument(*CB, ArgNo),
5162             DepClassTy::REQUIRED);
5163 
5164         // If a callsite argument use is nofree, we are fine.
5165         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5166             *this, IRPosition::callsite_argument(*CB, ArgNo),
5167             DepClassTy::REQUIRED);
5168 
5169         if (!NoCaptureAA.isAssumedNoCapture() ||
5170             !ArgNoFreeAA.isAssumedNoFree()) {
5171           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5172           ValidUsesOnly = false;
5173         }
5174         return true;
5175       }
5176 
5177       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5178           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5179         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5180         Follow = true;
5181         return true;
5182       }
5183       // Unknown user for which we can not track uses further (in a way that
5184       // makes sense).
5185       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5186       ValidUsesOnly = false;
5187       return true;
5188     };
5189     A.checkForAllUses(Pred, *this, I);
5190     return ValidUsesOnly;
5191   };
5192 
5193   auto MallocCallocCheck = [&](Instruction &I) {
5194     if (BadMallocCalls.count(&I))
5195       return true;
5196 
5197     bool IsMalloc = isMallocLikeFn(&I, TLI);
5198     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5199     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5200     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5201       BadMallocCalls.insert(&I);
5202       return true;
5203     }
5204 
5205     if (IsMalloc) {
5206       if (MaxHeapToStackSize == -1) {
5207         if (UsesCheck(I) || FreeCheck(I)) {
5208           MallocCalls.insert(&I);
5209           return true;
5210         }
5211       }
5212       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5213         if (Size->getValue().ule(MaxHeapToStackSize))
5214           if (UsesCheck(I) || FreeCheck(I)) {
5215             MallocCalls.insert(&I);
5216             return true;
5217           }
5218     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5219       if (MaxHeapToStackSize == -1) {
5220         if (UsesCheck(I) || FreeCheck(I)) {
5221           MallocCalls.insert(&I);
5222           return true;
5223         }
5224       }
5225       // Only if the alignment and sizes are constant.
5226       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5227         if (Size->getValue().ule(MaxHeapToStackSize))
5228           if (UsesCheck(I) || FreeCheck(I)) {
5229             MallocCalls.insert(&I);
5230             return true;
5231           }
5232     } else if (IsCalloc) {
5233       if (MaxHeapToStackSize == -1) {
5234         if (UsesCheck(I) || FreeCheck(I)) {
5235           MallocCalls.insert(&I);
5236           return true;
5237         }
5238       }
5239       bool Overflow = false;
5240       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5241         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5242           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5243                   .ule(MaxHeapToStackSize))
5244             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5245               MallocCalls.insert(&I);
5246               return true;
5247             }
5248     }
5249 
5250     BadMallocCalls.insert(&I);
5251     return true;
5252   };
5253 
5254   size_t NumBadMallocs = BadMallocCalls.size();
5255 
5256   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5257 
5258   if (NumBadMallocs != BadMallocCalls.size())
5259     return ChangeStatus::CHANGED;
5260 
5261   return ChangeStatus::UNCHANGED;
5262 }
5263 
5264 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5265   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5266       : AAHeapToStackImpl(IRP, A) {}
5267 
5268   /// See AbstractAttribute::trackStatistics().
5269   void trackStatistics() const override {
5270     STATS_DECL(
5271         MallocCalls, Function,
5272         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5273     for (auto *C : MallocCalls)
5274       if (!BadMallocCalls.count(C))
5275         ++BUILD_STAT_NAME(MallocCalls, Function);
5276   }
5277 };
5278 
5279 /// ----------------------- Privatizable Pointers ------------------------------
5280 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5281   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5282       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5283 
5284   ChangeStatus indicatePessimisticFixpoint() override {
5285     AAPrivatizablePtr::indicatePessimisticFixpoint();
5286     PrivatizableType = nullptr;
5287     return ChangeStatus::CHANGED;
5288   }
5289 
5290   /// Identify the type we can chose for a private copy of the underlying
5291   /// argument. None means it is not clear yet, nullptr means there is none.
5292   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5293 
5294   /// Return a privatizable type that encloses both T0 and T1.
5295   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5296   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5297     if (!T0.hasValue())
5298       return T1;
5299     if (!T1.hasValue())
5300       return T0;
5301     if (T0 == T1)
5302       return T0;
5303     return nullptr;
5304   }
5305 
5306   Optional<Type *> getPrivatizableType() const override {
5307     return PrivatizableType;
5308   }
5309 
5310   const std::string getAsStr() const override {
5311     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5312   }
5313 
5314 protected:
5315   Optional<Type *> PrivatizableType;
5316 };
5317 
5318 // TODO: Do this for call site arguments (probably also other values) as well.
5319 
5320 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5321   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5322       : AAPrivatizablePtrImpl(IRP, A) {}
5323 
5324   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5325   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5326     // If this is a byval argument and we know all the call sites (so we can
5327     // rewrite them), there is no need to check them explicitly.
5328     bool AllCallSitesKnown;
5329     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5330         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5331                                true, AllCallSitesKnown))
5332       return getAssociatedValue().getType()->getPointerElementType();
5333 
5334     Optional<Type *> Ty;
5335     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5336 
5337     // Make sure the associated call site argument has the same type at all call
5338     // sites and it is an allocation we know is safe to privatize, for now that
5339     // means we only allow alloca instructions.
5340     // TODO: We can additionally analyze the accesses in the callee to  create
5341     //       the type from that information instead. That is a little more
5342     //       involved and will be done in a follow up patch.
5343     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5344       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5345       // Check if a coresponding argument was found or if it is one not
5346       // associated (which can happen for callback calls).
5347       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5348         return false;
5349 
5350       // Check that all call sites agree on a type.
5351       auto &PrivCSArgAA =
5352           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5353       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5354 
5355       LLVM_DEBUG({
5356         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5357         if (CSTy.hasValue() && CSTy.getValue())
5358           CSTy.getValue()->print(dbgs());
5359         else if (CSTy.hasValue())
5360           dbgs() << "<nullptr>";
5361         else
5362           dbgs() << "<none>";
5363       });
5364 
5365       Ty = combineTypes(Ty, CSTy);
5366 
5367       LLVM_DEBUG({
5368         dbgs() << " : New Type: ";
5369         if (Ty.hasValue() && Ty.getValue())
5370           Ty.getValue()->print(dbgs());
5371         else if (Ty.hasValue())
5372           dbgs() << "<nullptr>";
5373         else
5374           dbgs() << "<none>";
5375         dbgs() << "\n";
5376       });
5377 
5378       return !Ty.hasValue() || Ty.getValue();
5379     };
5380 
5381     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5382       return nullptr;
5383     return Ty;
5384   }
5385 
5386   /// See AbstractAttribute::updateImpl(...).
5387   ChangeStatus updateImpl(Attributor &A) override {
5388     PrivatizableType = identifyPrivatizableType(A);
5389     if (!PrivatizableType.hasValue())
5390       return ChangeStatus::UNCHANGED;
5391     if (!PrivatizableType.getValue())
5392       return indicatePessimisticFixpoint();
5393 
5394     // The dependence is optional so we don't give up once we give up on the
5395     // alignment.
5396     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5397                         DepClassTy::OPTIONAL);
5398 
5399     // Avoid arguments with padding for now.
5400     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5401         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5402                                                 A.getInfoCache().getDL())) {
5403       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5404       return indicatePessimisticFixpoint();
5405     }
5406 
5407     // Verify callee and caller agree on how the promoted argument would be
5408     // passed.
5409     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5410     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5411     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5412     Function &Fn = *getIRPosition().getAnchorScope();
5413     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5414     ArgsToPromote.insert(getAssociatedArgument());
5415     const auto *TTI =
5416         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5417     if (!TTI ||
5418         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5419             Fn, *TTI, ArgsToPromote, Dummy) ||
5420         ArgsToPromote.empty()) {
5421       LLVM_DEBUG(
5422           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5423                  << Fn.getName() << "\n");
5424       return indicatePessimisticFixpoint();
5425     }
5426 
5427     // Collect the types that will replace the privatizable type in the function
5428     // signature.
5429     SmallVector<Type *, 16> ReplacementTypes;
5430     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5431 
5432     // Register a rewrite of the argument.
5433     Argument *Arg = getAssociatedArgument();
5434     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5435       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5436       return indicatePessimisticFixpoint();
5437     }
5438 
5439     unsigned ArgNo = Arg->getArgNo();
5440 
5441     // Helper to check if for the given call site the associated argument is
5442     // passed to a callback where the privatization would be different.
5443     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5444       SmallVector<const Use *, 4> CallbackUses;
5445       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5446       for (const Use *U : CallbackUses) {
5447         AbstractCallSite CBACS(U);
5448         assert(CBACS && CBACS.isCallbackCall());
5449         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5450           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5451 
5452           LLVM_DEBUG({
5453             dbgs()
5454                 << "[AAPrivatizablePtr] Argument " << *Arg
5455                 << "check if can be privatized in the context of its parent ("
5456                 << Arg->getParent()->getName()
5457                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5458                    "callback ("
5459                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5460                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5461                 << CBACS.getCallArgOperand(CBArg) << " vs "
5462                 << CB.getArgOperand(ArgNo) << "\n"
5463                 << "[AAPrivatizablePtr] " << CBArg << " : "
5464                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5465           });
5466 
5467           if (CBArgNo != int(ArgNo))
5468             continue;
5469           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5470               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5471           if (CBArgPrivAA.isValidState()) {
5472             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5473             if (!CBArgPrivTy.hasValue())
5474               continue;
5475             if (CBArgPrivTy.getValue() == PrivatizableType)
5476               continue;
5477           }
5478 
5479           LLVM_DEBUG({
5480             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5481                    << " cannot be privatized in the context of its parent ("
5482                    << Arg->getParent()->getName()
5483                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5484                       "callback ("
5485                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5486                    << ").\n[AAPrivatizablePtr] for which the argument "
5487                       "privatization is not compatible.\n";
5488           });
5489           return false;
5490         }
5491       }
5492       return true;
5493     };
5494 
5495     // Helper to check if for the given call site the associated argument is
5496     // passed to a direct call where the privatization would be different.
5497     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5498       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5499       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5500       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5501              "Expected a direct call operand for callback call operand");
5502 
5503       LLVM_DEBUG({
5504         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5505                << " check if be privatized in the context of its parent ("
5506                << Arg->getParent()->getName()
5507                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5508                   "direct call of ("
5509                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5510                << ").\n";
5511       });
5512 
5513       Function *DCCallee = DC->getCalledFunction();
5514       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5515         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5516             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5517             DepClassTy::REQUIRED);
5518         if (DCArgPrivAA.isValidState()) {
5519           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5520           if (!DCArgPrivTy.hasValue())
5521             return true;
5522           if (DCArgPrivTy.getValue() == PrivatizableType)
5523             return true;
5524         }
5525       }
5526 
5527       LLVM_DEBUG({
5528         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5529                << " cannot be privatized in the context of its parent ("
5530                << Arg->getParent()->getName()
5531                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5532                   "direct call of ("
5533                << ACS.getInstruction()->getCalledFunction()->getName()
5534                << ").\n[AAPrivatizablePtr] for which the argument "
5535                   "privatization is not compatible.\n";
5536       });
5537       return false;
5538     };
5539 
5540     // Helper to check if the associated argument is used at the given abstract
5541     // call site in a way that is incompatible with the privatization assumed
5542     // here.
5543     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5544       if (ACS.isDirectCall())
5545         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5546       if (ACS.isCallbackCall())
5547         return IsCompatiblePrivArgOfDirectCS(ACS);
5548       return false;
5549     };
5550 
5551     bool AllCallSitesKnown;
5552     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5553                                 AllCallSitesKnown))
5554       return indicatePessimisticFixpoint();
5555 
5556     return ChangeStatus::UNCHANGED;
5557   }
5558 
5559   /// Given a type to private \p PrivType, collect the constituates (which are
5560   /// used) in \p ReplacementTypes.
5561   static void
5562   identifyReplacementTypes(Type *PrivType,
5563                            SmallVectorImpl<Type *> &ReplacementTypes) {
5564     // TODO: For now we expand the privatization type to the fullest which can
5565     //       lead to dead arguments that need to be removed later.
5566     assert(PrivType && "Expected privatizable type!");
5567 
5568     // Traverse the type, extract constituate types on the outermost level.
5569     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5570       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5571         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5572     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5573       ReplacementTypes.append(PrivArrayType->getNumElements(),
5574                               PrivArrayType->getElementType());
5575     } else {
5576       ReplacementTypes.push_back(PrivType);
5577     }
5578   }
5579 
5580   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5581   /// The values needed are taken from the arguments of \p F starting at
5582   /// position \p ArgNo.
5583   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5584                                    unsigned ArgNo, Instruction &IP) {
5585     assert(PrivType && "Expected privatizable type!");
5586 
5587     IRBuilder<NoFolder> IRB(&IP);
5588     const DataLayout &DL = F.getParent()->getDataLayout();
5589 
5590     // Traverse the type, build GEPs and stores.
5591     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5592       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5593       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5594         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5595         Value *Ptr =
5596             constructPointer(PointeeTy, PrivType, &Base,
5597                              PrivStructLayout->getElementOffset(u), IRB, DL);
5598         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5599       }
5600     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5601       Type *PointeeTy = PrivArrayType->getElementType();
5602       Type *PointeePtrTy = PointeeTy->getPointerTo();
5603       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5604       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5605         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5606                                       u * PointeeTySize, IRB, DL);
5607         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5608       }
5609     } else {
5610       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5611     }
5612   }
5613 
5614   /// Extract values from \p Base according to the type \p PrivType at the
5615   /// call position \p ACS. The values are appended to \p ReplacementValues.
5616   void createReplacementValues(Align Alignment, Type *PrivType,
5617                                AbstractCallSite ACS, Value *Base,
5618                                SmallVectorImpl<Value *> &ReplacementValues) {
5619     assert(Base && "Expected base value!");
5620     assert(PrivType && "Expected privatizable type!");
5621     Instruction *IP = ACS.getInstruction();
5622 
5623     IRBuilder<NoFolder> IRB(IP);
5624     const DataLayout &DL = IP->getModule()->getDataLayout();
5625 
5626     if (Base->getType()->getPointerElementType() != PrivType)
5627       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5628                                                  "", ACS.getInstruction());
5629 
5630     // Traverse the type, build GEPs and loads.
5631     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5632       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5633       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5634         Type *PointeeTy = PrivStructType->getElementType(u);
5635         Value *Ptr =
5636             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5637                              PrivStructLayout->getElementOffset(u), IRB, DL);
5638         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5639         L->setAlignment(Alignment);
5640         ReplacementValues.push_back(L);
5641       }
5642     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5643       Type *PointeeTy = PrivArrayType->getElementType();
5644       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5645       Type *PointeePtrTy = PointeeTy->getPointerTo();
5646       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5647         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5648                                       u * PointeeTySize, IRB, DL);
5649         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5650         L->setAlignment(Alignment);
5651         ReplacementValues.push_back(L);
5652       }
5653     } else {
5654       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5655       L->setAlignment(Alignment);
5656       ReplacementValues.push_back(L);
5657     }
5658   }
5659 
5660   /// See AbstractAttribute::manifest(...)
5661   ChangeStatus manifest(Attributor &A) override {
5662     if (!PrivatizableType.hasValue())
5663       return ChangeStatus::UNCHANGED;
5664     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5665 
5666     // Collect all tail calls in the function as we cannot allow new allocas to
5667     // escape into tail recursion.
5668     // TODO: Be smarter about new allocas escaping into tail calls.
5669     SmallVector<CallInst *, 16> TailCalls;
5670     if (!A.checkForAllInstructions(
5671             [&](Instruction &I) {
5672               CallInst &CI = cast<CallInst>(I);
5673               if (CI.isTailCall())
5674                 TailCalls.push_back(&CI);
5675               return true;
5676             },
5677             *this, {Instruction::Call}))
5678       return ChangeStatus::UNCHANGED;
5679 
5680     Argument *Arg = getAssociatedArgument();
5681     // Query AAAlign attribute for alignment of associated argument to
5682     // determine the best alignment of loads.
5683     const auto &AlignAA =
5684         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5685 
5686     // Callback to repair the associated function. A new alloca is placed at the
5687     // beginning and initialized with the values passed through arguments. The
5688     // new alloca replaces the use of the old pointer argument.
5689     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5690         [=](const Attributor::ArgumentReplacementInfo &ARI,
5691             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5692           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5693           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5694           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5695                                            Arg->getName() + ".priv", IP);
5696           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5697                                ArgIt->getArgNo(), *IP);
5698 
5699           if (AI->getType() != Arg->getType())
5700             AI =
5701                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5702           Arg->replaceAllUsesWith(AI);
5703 
5704           for (CallInst *CI : TailCalls)
5705             CI->setTailCall(false);
5706         };
5707 
5708     // Callback to repair a call site of the associated function. The elements
5709     // of the privatizable type are loaded prior to the call and passed to the
5710     // new function version.
5711     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5712         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5713                       AbstractCallSite ACS,
5714                       SmallVectorImpl<Value *> &NewArgOperands) {
5715           // When no alignment is specified for the load instruction,
5716           // natural alignment is assumed.
5717           createReplacementValues(
5718               assumeAligned(AlignAA.getAssumedAlign()),
5719               PrivatizableType.getValue(), ACS,
5720               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5721               NewArgOperands);
5722         };
5723 
5724     // Collect the types that will replace the privatizable type in the function
5725     // signature.
5726     SmallVector<Type *, 16> ReplacementTypes;
5727     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5728 
5729     // Register a rewrite of the argument.
5730     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5731                                            std::move(FnRepairCB),
5732                                            std::move(ACSRepairCB)))
5733       return ChangeStatus::CHANGED;
5734     return ChangeStatus::UNCHANGED;
5735   }
5736 
5737   /// See AbstractAttribute::trackStatistics()
5738   void trackStatistics() const override {
5739     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5740   }
5741 };
5742 
5743 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5744   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5745       : AAPrivatizablePtrImpl(IRP, A) {}
5746 
5747   /// See AbstractAttribute::initialize(...).
5748   virtual void initialize(Attributor &A) override {
5749     // TODO: We can privatize more than arguments.
5750     indicatePessimisticFixpoint();
5751   }
5752 
5753   ChangeStatus updateImpl(Attributor &A) override {
5754     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5755                      "updateImpl will not be called");
5756   }
5757 
5758   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5759   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5760     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5761     if (!Obj) {
5762       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5763       return nullptr;
5764     }
5765 
5766     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5767       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5768         if (CI->isOne())
5769           return Obj->getType()->getPointerElementType();
5770     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5771       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5772           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5773       if (PrivArgAA.isAssumedPrivatizablePtr())
5774         return Obj->getType()->getPointerElementType();
5775     }
5776 
5777     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5778                          "alloca nor privatizable argument: "
5779                       << *Obj << "!\n");
5780     return nullptr;
5781   }
5782 
5783   /// See AbstractAttribute::trackStatistics()
5784   void trackStatistics() const override {
5785     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5786   }
5787 };
5788 
5789 struct AAPrivatizablePtrCallSiteArgument final
5790     : public AAPrivatizablePtrFloating {
5791   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5792       : AAPrivatizablePtrFloating(IRP, A) {}
5793 
5794   /// See AbstractAttribute::initialize(...).
5795   void initialize(Attributor &A) override {
5796     if (getIRPosition().hasAttr(Attribute::ByVal))
5797       indicateOptimisticFixpoint();
5798   }
5799 
5800   /// See AbstractAttribute::updateImpl(...).
5801   ChangeStatus updateImpl(Attributor &A) override {
5802     PrivatizableType = identifyPrivatizableType(A);
5803     if (!PrivatizableType.hasValue())
5804       return ChangeStatus::UNCHANGED;
5805     if (!PrivatizableType.getValue())
5806       return indicatePessimisticFixpoint();
5807 
5808     const IRPosition &IRP = getIRPosition();
5809     auto &NoCaptureAA =
5810         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5811     if (!NoCaptureAA.isAssumedNoCapture()) {
5812       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5813       return indicatePessimisticFixpoint();
5814     }
5815 
5816     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5817     if (!NoAliasAA.isAssumedNoAlias()) {
5818       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5819       return indicatePessimisticFixpoint();
5820     }
5821 
5822     const auto &MemBehaviorAA =
5823         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5824     if (!MemBehaviorAA.isAssumedReadOnly()) {
5825       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5826       return indicatePessimisticFixpoint();
5827     }
5828 
5829     return ChangeStatus::UNCHANGED;
5830   }
5831 
5832   /// See AbstractAttribute::trackStatistics()
5833   void trackStatistics() const override {
5834     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5835   }
5836 };
5837 
5838 struct AAPrivatizablePtrCallSiteReturned final
5839     : public AAPrivatizablePtrFloating {
5840   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5841       : AAPrivatizablePtrFloating(IRP, A) {}
5842 
5843   /// See AbstractAttribute::initialize(...).
5844   void initialize(Attributor &A) override {
5845     // TODO: We can privatize more than arguments.
5846     indicatePessimisticFixpoint();
5847   }
5848 
5849   /// See AbstractAttribute::trackStatistics()
5850   void trackStatistics() const override {
5851     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5852   }
5853 };
5854 
5855 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5856   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5857       : AAPrivatizablePtrFloating(IRP, A) {}
5858 
5859   /// See AbstractAttribute::initialize(...).
5860   void initialize(Attributor &A) override {
5861     // TODO: We can privatize more than arguments.
5862     indicatePessimisticFixpoint();
5863   }
5864 
5865   /// See AbstractAttribute::trackStatistics()
5866   void trackStatistics() const override {
5867     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5868   }
5869 };
5870 
5871 /// -------------------- Memory Behavior Attributes ----------------------------
5872 /// Includes read-none, read-only, and write-only.
5873 /// ----------------------------------------------------------------------------
5874 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5875   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5876       : AAMemoryBehavior(IRP, A) {}
5877 
5878   /// See AbstractAttribute::initialize(...).
5879   void initialize(Attributor &A) override {
5880     intersectAssumedBits(BEST_STATE);
5881     getKnownStateFromValue(getIRPosition(), getState());
5882     AAMemoryBehavior::initialize(A);
5883   }
5884 
5885   /// Return the memory behavior information encoded in the IR for \p IRP.
5886   static void getKnownStateFromValue(const IRPosition &IRP,
5887                                      BitIntegerState &State,
5888                                      bool IgnoreSubsumingPositions = false) {
5889     SmallVector<Attribute, 2> Attrs;
5890     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5891     for (const Attribute &Attr : Attrs) {
5892       switch (Attr.getKindAsEnum()) {
5893       case Attribute::ReadNone:
5894         State.addKnownBits(NO_ACCESSES);
5895         break;
5896       case Attribute::ReadOnly:
5897         State.addKnownBits(NO_WRITES);
5898         break;
5899       case Attribute::WriteOnly:
5900         State.addKnownBits(NO_READS);
5901         break;
5902       default:
5903         llvm_unreachable("Unexpected attribute!");
5904       }
5905     }
5906 
5907     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5908       if (!I->mayReadFromMemory())
5909         State.addKnownBits(NO_READS);
5910       if (!I->mayWriteToMemory())
5911         State.addKnownBits(NO_WRITES);
5912     }
5913   }
5914 
5915   /// See AbstractAttribute::getDeducedAttributes(...).
5916   void getDeducedAttributes(LLVMContext &Ctx,
5917                             SmallVectorImpl<Attribute> &Attrs) const override {
5918     assert(Attrs.size() == 0);
5919     if (isAssumedReadNone())
5920       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5921     else if (isAssumedReadOnly())
5922       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5923     else if (isAssumedWriteOnly())
5924       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5925     assert(Attrs.size() <= 1);
5926   }
5927 
5928   /// See AbstractAttribute::manifest(...).
5929   ChangeStatus manifest(Attributor &A) override {
5930     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5931       return ChangeStatus::UNCHANGED;
5932 
5933     const IRPosition &IRP = getIRPosition();
5934 
5935     // Check if we would improve the existing attributes first.
5936     SmallVector<Attribute, 4> DeducedAttrs;
5937     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5938     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5939           return IRP.hasAttr(Attr.getKindAsEnum(),
5940                              /* IgnoreSubsumingPositions */ true);
5941         }))
5942       return ChangeStatus::UNCHANGED;
5943 
5944     // Clear existing attributes.
5945     IRP.removeAttrs(AttrKinds);
5946 
5947     // Use the generic manifest method.
5948     return IRAttribute::manifest(A);
5949   }
5950 
5951   /// See AbstractState::getAsStr().
5952   const std::string getAsStr() const override {
5953     if (isAssumedReadNone())
5954       return "readnone";
5955     if (isAssumedReadOnly())
5956       return "readonly";
5957     if (isAssumedWriteOnly())
5958       return "writeonly";
5959     return "may-read/write";
5960   }
5961 
5962   /// The set of IR attributes AAMemoryBehavior deals with.
5963   static const Attribute::AttrKind AttrKinds[3];
5964 };
5965 
5966 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5967     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5968 
5969 /// Memory behavior attribute for a floating value.
5970 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5971   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5972       : AAMemoryBehaviorImpl(IRP, A) {}
5973 
5974   /// See AbstractAttribute::initialize(...).
5975   void initialize(Attributor &A) override {
5976     AAMemoryBehaviorImpl::initialize(A);
5977     addUsesOf(A, getAssociatedValue());
5978   }
5979 
5980   /// See AbstractAttribute::updateImpl(...).
5981   ChangeStatus updateImpl(Attributor &A) override;
5982 
5983   /// See AbstractAttribute::trackStatistics()
5984   void trackStatistics() const override {
5985     if (isAssumedReadNone())
5986       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5987     else if (isAssumedReadOnly())
5988       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5989     else if (isAssumedWriteOnly())
5990       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5991   }
5992 
5993 private:
5994   /// Return true if users of \p UserI might access the underlying
5995   /// variable/location described by \p U and should therefore be analyzed.
5996   bool followUsersOfUseIn(Attributor &A, const Use *U,
5997                           const Instruction *UserI);
5998 
5999   /// Update the state according to the effect of use \p U in \p UserI.
6000   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6001 
6002 protected:
6003   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6004   void addUsesOf(Attributor &A, const Value &V);
6005 
6006   /// Container for (transitive) uses of the associated argument.
6007   SmallVector<const Use *, 8> Uses;
6008 
6009   /// Set to remember the uses we already traversed.
6010   SmallPtrSet<const Use *, 8> Visited;
6011 };
6012 
6013 /// Memory behavior attribute for function argument.
6014 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6015   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6016       : AAMemoryBehaviorFloating(IRP, A) {}
6017 
6018   /// See AbstractAttribute::initialize(...).
6019   void initialize(Attributor &A) override {
6020     intersectAssumedBits(BEST_STATE);
6021     const IRPosition &IRP = getIRPosition();
6022     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6023     // can query it when we use has/getAttr. That would allow us to reuse the
6024     // initialize of the base class here.
6025     bool HasByVal =
6026         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6027     getKnownStateFromValue(IRP, getState(),
6028                            /* IgnoreSubsumingPositions */ HasByVal);
6029 
6030     // Initialize the use vector with all direct uses of the associated value.
6031     Argument *Arg = getAssociatedArgument();
6032     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6033       indicatePessimisticFixpoint();
6034     } else {
6035       addUsesOf(A, *Arg);
6036     }
6037   }
6038 
6039   ChangeStatus manifest(Attributor &A) override {
6040     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6041     if (!getAssociatedValue().getType()->isPointerTy())
6042       return ChangeStatus::UNCHANGED;
6043 
6044     // TODO: From readattrs.ll: "inalloca parameters are always
6045     //                           considered written"
6046     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6047       removeKnownBits(NO_WRITES);
6048       removeAssumedBits(NO_WRITES);
6049     }
6050     return AAMemoryBehaviorFloating::manifest(A);
6051   }
6052 
6053   /// See AbstractAttribute::trackStatistics()
6054   void trackStatistics() const override {
6055     if (isAssumedReadNone())
6056       STATS_DECLTRACK_ARG_ATTR(readnone)
6057     else if (isAssumedReadOnly())
6058       STATS_DECLTRACK_ARG_ATTR(readonly)
6059     else if (isAssumedWriteOnly())
6060       STATS_DECLTRACK_ARG_ATTR(writeonly)
6061   }
6062 };
6063 
6064 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6065   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6066       : AAMemoryBehaviorArgument(IRP, A) {}
6067 
6068   /// See AbstractAttribute::initialize(...).
6069   void initialize(Attributor &A) override {
6070     // If we don't have an associated attribute this is either a variadic call
6071     // or an indirect call, either way, nothing to do here.
6072     Argument *Arg = getAssociatedArgument();
6073     if (!Arg) {
6074       indicatePessimisticFixpoint();
6075       return;
6076     }
6077     if (Arg->hasByValAttr()) {
6078       addKnownBits(NO_WRITES);
6079       removeKnownBits(NO_READS);
6080       removeAssumedBits(NO_READS);
6081     }
6082     AAMemoryBehaviorArgument::initialize(A);
6083     if (getAssociatedFunction()->isDeclaration())
6084       indicatePessimisticFixpoint();
6085   }
6086 
6087   /// See AbstractAttribute::updateImpl(...).
6088   ChangeStatus updateImpl(Attributor &A) override {
6089     // TODO: Once we have call site specific value information we can provide
6090     //       call site specific liveness liveness information and then it makes
6091     //       sense to specialize attributes for call sites arguments instead of
6092     //       redirecting requests to the callee argument.
6093     Argument *Arg = getAssociatedArgument();
6094     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6095     auto &ArgAA =
6096         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6097     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6098   }
6099 
6100   /// See AbstractAttribute::trackStatistics()
6101   void trackStatistics() const override {
6102     if (isAssumedReadNone())
6103       STATS_DECLTRACK_CSARG_ATTR(readnone)
6104     else if (isAssumedReadOnly())
6105       STATS_DECLTRACK_CSARG_ATTR(readonly)
6106     else if (isAssumedWriteOnly())
6107       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6108   }
6109 };
6110 
6111 /// Memory behavior attribute for a call site return position.
6112 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6113   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6114       : AAMemoryBehaviorFloating(IRP, A) {}
6115 
6116   /// See AbstractAttribute::initialize(...).
6117   void initialize(Attributor &A) override {
6118     AAMemoryBehaviorImpl::initialize(A);
6119     Function *F = getAssociatedFunction();
6120     if (!F || F->isDeclaration())
6121       indicatePessimisticFixpoint();
6122   }
6123 
6124   /// See AbstractAttribute::manifest(...).
6125   ChangeStatus manifest(Attributor &A) override {
6126     // We do not annotate returned values.
6127     return ChangeStatus::UNCHANGED;
6128   }
6129 
6130   /// See AbstractAttribute::trackStatistics()
6131   void trackStatistics() const override {}
6132 };
6133 
6134 /// An AA to represent the memory behavior function attributes.
6135 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6136   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6137       : AAMemoryBehaviorImpl(IRP, A) {}
6138 
6139   /// See AbstractAttribute::updateImpl(Attributor &A).
6140   virtual ChangeStatus updateImpl(Attributor &A) override;
6141 
6142   /// See AbstractAttribute::manifest(...).
6143   ChangeStatus manifest(Attributor &A) override {
6144     Function &F = cast<Function>(getAnchorValue());
6145     if (isAssumedReadNone()) {
6146       F.removeFnAttr(Attribute::ArgMemOnly);
6147       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6148       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6149     }
6150     return AAMemoryBehaviorImpl::manifest(A);
6151   }
6152 
6153   /// See AbstractAttribute::trackStatistics()
6154   void trackStatistics() const override {
6155     if (isAssumedReadNone())
6156       STATS_DECLTRACK_FN_ATTR(readnone)
6157     else if (isAssumedReadOnly())
6158       STATS_DECLTRACK_FN_ATTR(readonly)
6159     else if (isAssumedWriteOnly())
6160       STATS_DECLTRACK_FN_ATTR(writeonly)
6161   }
6162 };
6163 
6164 /// AAMemoryBehavior attribute for call sites.
6165 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6166   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6167       : AAMemoryBehaviorImpl(IRP, A) {}
6168 
6169   /// See AbstractAttribute::initialize(...).
6170   void initialize(Attributor &A) override {
6171     AAMemoryBehaviorImpl::initialize(A);
6172     Function *F = getAssociatedFunction();
6173     if (!F || F->isDeclaration())
6174       indicatePessimisticFixpoint();
6175   }
6176 
6177   /// See AbstractAttribute::updateImpl(...).
6178   ChangeStatus updateImpl(Attributor &A) override {
6179     // TODO: Once we have call site specific value information we can provide
6180     //       call site specific liveness liveness information and then it makes
6181     //       sense to specialize attributes for call sites arguments instead of
6182     //       redirecting requests to the callee argument.
6183     Function *F = getAssociatedFunction();
6184     const IRPosition &FnPos = IRPosition::function(*F);
6185     auto &FnAA =
6186         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6187     return clampStateAndIndicateChange(getState(), FnAA.getState());
6188   }
6189 
6190   /// See AbstractAttribute::trackStatistics()
6191   void trackStatistics() const override {
6192     if (isAssumedReadNone())
6193       STATS_DECLTRACK_CS_ATTR(readnone)
6194     else if (isAssumedReadOnly())
6195       STATS_DECLTRACK_CS_ATTR(readonly)
6196     else if (isAssumedWriteOnly())
6197       STATS_DECLTRACK_CS_ATTR(writeonly)
6198   }
6199 };
6200 
6201 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6202 
6203   // The current assumed state used to determine a change.
6204   auto AssumedState = getAssumed();
6205 
6206   auto CheckRWInst = [&](Instruction &I) {
6207     // If the instruction has an own memory behavior state, use it to restrict
6208     // the local state. No further analysis is required as the other memory
6209     // state is as optimistic as it gets.
6210     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6211       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6212           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6213       intersectAssumedBits(MemBehaviorAA.getAssumed());
6214       return !isAtFixpoint();
6215     }
6216 
6217     // Remove access kind modifiers if necessary.
6218     if (I.mayReadFromMemory())
6219       removeAssumedBits(NO_READS);
6220     if (I.mayWriteToMemory())
6221       removeAssumedBits(NO_WRITES);
6222     return !isAtFixpoint();
6223   };
6224 
6225   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6226     return indicatePessimisticFixpoint();
6227 
6228   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6229                                         : ChangeStatus::UNCHANGED;
6230 }
6231 
6232 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6233 
6234   const IRPosition &IRP = getIRPosition();
6235   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6236   AAMemoryBehavior::StateType &S = getState();
6237 
6238   // First, check the function scope. We take the known information and we avoid
6239   // work if the assumed information implies the current assumed information for
6240   // this attribute. This is a valid for all but byval arguments.
6241   Argument *Arg = IRP.getAssociatedArgument();
6242   AAMemoryBehavior::base_t FnMemAssumedState =
6243       AAMemoryBehavior::StateType::getWorstState();
6244   if (!Arg || !Arg->hasByValAttr()) {
6245     const auto &FnMemAA =
6246         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6247     FnMemAssumedState = FnMemAA.getAssumed();
6248     S.addKnownBits(FnMemAA.getKnown());
6249     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6250       return ChangeStatus::UNCHANGED;
6251   }
6252 
6253   // Make sure the value is not captured (except through "return"), if
6254   // it is, any information derived would be irrelevant anyway as we cannot
6255   // check the potential aliases introduced by the capture. However, no need
6256   // to fall back to anythign less optimistic than the function state.
6257   const auto &ArgNoCaptureAA =
6258       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6259   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6260     S.intersectAssumedBits(FnMemAssumedState);
6261     return ChangeStatus::CHANGED;
6262   }
6263 
6264   // The current assumed state used to determine a change.
6265   auto AssumedState = S.getAssumed();
6266 
6267   // Liveness information to exclude dead users.
6268   // TODO: Take the FnPos once we have call site specific liveness information.
6269   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6270       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6271       DepClassTy::NONE);
6272 
6273   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6274   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6275     const Use *U = Uses[i];
6276     Instruction *UserI = cast<Instruction>(U->getUser());
6277     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6278                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6279                       << "]\n");
6280     if (A.isAssumedDead(*U, this, &LivenessAA))
6281       continue;
6282 
6283     // Droppable users, e.g., llvm::assume does not actually perform any action.
6284     if (UserI->isDroppable())
6285       continue;
6286 
6287     // Check if the users of UserI should also be visited.
6288     if (followUsersOfUseIn(A, U, UserI))
6289       addUsesOf(A, *UserI);
6290 
6291     // If UserI might touch memory we analyze the use in detail.
6292     if (UserI->mayReadOrWriteMemory())
6293       analyzeUseIn(A, U, UserI);
6294   }
6295 
6296   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6297                                         : ChangeStatus::UNCHANGED;
6298 }
6299 
6300 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6301   SmallVector<const Use *, 8> WL;
6302   for (const Use &U : V.uses())
6303     WL.push_back(&U);
6304 
6305   while (!WL.empty()) {
6306     const Use *U = WL.pop_back_val();
6307     if (!Visited.insert(U).second)
6308       continue;
6309 
6310     const Instruction *UserI = cast<Instruction>(U->getUser());
6311     if (UserI->mayReadOrWriteMemory()) {
6312       Uses.push_back(U);
6313       continue;
6314     }
6315     if (!followUsersOfUseIn(A, U, UserI))
6316       continue;
6317     for (const Use &UU : UserI->uses())
6318       WL.push_back(&UU);
6319   }
6320 }
6321 
6322 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6323                                                   const Instruction *UserI) {
6324   // The loaded value is unrelated to the pointer argument, no need to
6325   // follow the users of the load.
6326   if (isa<LoadInst>(UserI))
6327     return false;
6328 
6329   // By default we follow all uses assuming UserI might leak information on U,
6330   // we have special handling for call sites operands though.
6331   const auto *CB = dyn_cast<CallBase>(UserI);
6332   if (!CB || !CB->isArgOperand(U))
6333     return true;
6334 
6335   // If the use is a call argument known not to be captured, the users of
6336   // the call do not need to be visited because they have to be unrelated to
6337   // the input. Note that this check is not trivial even though we disallow
6338   // general capturing of the underlying argument. The reason is that the
6339   // call might the argument "through return", which we allow and for which we
6340   // need to check call users.
6341   if (U->get()->getType()->isPointerTy()) {
6342     unsigned ArgNo = CB->getArgOperandNo(U);
6343     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6344         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6345     return !ArgNoCaptureAA.isAssumedNoCapture();
6346   }
6347 
6348   return true;
6349 }
6350 
6351 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6352                                             const Instruction *UserI) {
6353   assert(UserI->mayReadOrWriteMemory());
6354 
6355   switch (UserI->getOpcode()) {
6356   default:
6357     // TODO: Handle all atomics and other side-effect operations we know of.
6358     break;
6359   case Instruction::Load:
6360     // Loads cause the NO_READS property to disappear.
6361     removeAssumedBits(NO_READS);
6362     return;
6363 
6364   case Instruction::Store:
6365     // Stores cause the NO_WRITES property to disappear if the use is the
6366     // pointer operand. Note that we do assume that capturing was taken care of
6367     // somewhere else.
6368     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6369       removeAssumedBits(NO_WRITES);
6370     return;
6371 
6372   case Instruction::Call:
6373   case Instruction::CallBr:
6374   case Instruction::Invoke: {
6375     // For call sites we look at the argument memory behavior attribute (this
6376     // could be recursive!) in order to restrict our own state.
6377     const auto *CB = cast<CallBase>(UserI);
6378 
6379     // Give up on operand bundles.
6380     if (CB->isBundleOperand(U)) {
6381       indicatePessimisticFixpoint();
6382       return;
6383     }
6384 
6385     // Calling a function does read the function pointer, maybe write it if the
6386     // function is self-modifying.
6387     if (CB->isCallee(U)) {
6388       removeAssumedBits(NO_READS);
6389       break;
6390     }
6391 
6392     // Adjust the possible access behavior based on the information on the
6393     // argument.
6394     IRPosition Pos;
6395     if (U->get()->getType()->isPointerTy())
6396       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6397     else
6398       Pos = IRPosition::callsite_function(*CB);
6399     const auto &MemBehaviorAA =
6400         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6401     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6402     // and at least "known".
6403     intersectAssumedBits(MemBehaviorAA.getAssumed());
6404     return;
6405   }
6406   };
6407 
6408   // Generally, look at the "may-properties" and adjust the assumed state if we
6409   // did not trigger special handling before.
6410   if (UserI->mayReadFromMemory())
6411     removeAssumedBits(NO_READS);
6412   if (UserI->mayWriteToMemory())
6413     removeAssumedBits(NO_WRITES);
6414 }
6415 
6416 } // namespace
6417 
6418 /// -------------------- Memory Locations Attributes ---------------------------
6419 /// Includes read-none, argmemonly, inaccessiblememonly,
6420 /// inaccessiblememorargmemonly
6421 /// ----------------------------------------------------------------------------
6422 
6423 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6424     AAMemoryLocation::MemoryLocationsKind MLK) {
6425   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6426     return "all memory";
6427   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6428     return "no memory";
6429   std::string S = "memory:";
6430   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6431     S += "stack,";
6432   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6433     S += "constant,";
6434   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6435     S += "internal global,";
6436   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6437     S += "external global,";
6438   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6439     S += "argument,";
6440   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6441     S += "inaccessible,";
6442   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6443     S += "malloced,";
6444   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6445     S += "unknown,";
6446   S.pop_back();
6447   return S;
6448 }
6449 
6450 namespace {
6451 struct AAMemoryLocationImpl : public AAMemoryLocation {
6452 
6453   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6454       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6455     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6456       AccessKind2Accesses[u] = nullptr;
6457   }
6458 
6459   ~AAMemoryLocationImpl() {
6460     // The AccessSets are allocated via a BumpPtrAllocator, we call
6461     // the destructor manually.
6462     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6463       if (AccessKind2Accesses[u])
6464         AccessKind2Accesses[u]->~AccessSet();
6465   }
6466 
6467   /// See AbstractAttribute::initialize(...).
6468   void initialize(Attributor &A) override {
6469     intersectAssumedBits(BEST_STATE);
6470     getKnownStateFromValue(A, getIRPosition(), getState());
6471     AAMemoryLocation::initialize(A);
6472   }
6473 
6474   /// Return the memory behavior information encoded in the IR for \p IRP.
6475   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6476                                      BitIntegerState &State,
6477                                      bool IgnoreSubsumingPositions = false) {
6478     // For internal functions we ignore `argmemonly` and
6479     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6480     // constant propagation. It is unclear if this is the best way but it is
6481     // unlikely this will cause real performance problems. If we are deriving
6482     // attributes for the anchor function we even remove the attribute in
6483     // addition to ignoring it.
6484     bool UseArgMemOnly = true;
6485     Function *AnchorFn = IRP.getAnchorScope();
6486     if (AnchorFn && A.isRunOn(*AnchorFn))
6487       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6488 
6489     SmallVector<Attribute, 2> Attrs;
6490     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6491     for (const Attribute &Attr : Attrs) {
6492       switch (Attr.getKindAsEnum()) {
6493       case Attribute::ReadNone:
6494         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6495         break;
6496       case Attribute::InaccessibleMemOnly:
6497         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6498         break;
6499       case Attribute::ArgMemOnly:
6500         if (UseArgMemOnly)
6501           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6502         else
6503           IRP.removeAttrs({Attribute::ArgMemOnly});
6504         break;
6505       case Attribute::InaccessibleMemOrArgMemOnly:
6506         if (UseArgMemOnly)
6507           State.addKnownBits(inverseLocation(
6508               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6509         else
6510           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6511         break;
6512       default:
6513         llvm_unreachable("Unexpected attribute!");
6514       }
6515     }
6516   }
6517 
6518   /// See AbstractAttribute::getDeducedAttributes(...).
6519   void getDeducedAttributes(LLVMContext &Ctx,
6520                             SmallVectorImpl<Attribute> &Attrs) const override {
6521     assert(Attrs.size() == 0);
6522     if (isAssumedReadNone()) {
6523       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6524     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6525       if (isAssumedInaccessibleMemOnly())
6526         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6527       else if (isAssumedArgMemOnly())
6528         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6529       else if (isAssumedInaccessibleOrArgMemOnly())
6530         Attrs.push_back(
6531             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6532     }
6533     assert(Attrs.size() <= 1);
6534   }
6535 
6536   /// See AbstractAttribute::manifest(...).
6537   ChangeStatus manifest(Attributor &A) override {
6538     const IRPosition &IRP = getIRPosition();
6539 
6540     // Check if we would improve the existing attributes first.
6541     SmallVector<Attribute, 4> DeducedAttrs;
6542     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6543     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6544           return IRP.hasAttr(Attr.getKindAsEnum(),
6545                              /* IgnoreSubsumingPositions */ true);
6546         }))
6547       return ChangeStatus::UNCHANGED;
6548 
6549     // Clear existing attributes.
6550     IRP.removeAttrs(AttrKinds);
6551     if (isAssumedReadNone())
6552       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6553 
6554     // Use the generic manifest method.
6555     return IRAttribute::manifest(A);
6556   }
6557 
6558   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6559   bool checkForAllAccessesToMemoryKind(
6560       function_ref<bool(const Instruction *, const Value *, AccessKind,
6561                         MemoryLocationsKind)>
6562           Pred,
6563       MemoryLocationsKind RequestedMLK) const override {
6564     if (!isValidState())
6565       return false;
6566 
6567     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6568     if (AssumedMLK == NO_LOCATIONS)
6569       return true;
6570 
6571     unsigned Idx = 0;
6572     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6573          CurMLK *= 2, ++Idx) {
6574       if (CurMLK & RequestedMLK)
6575         continue;
6576 
6577       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6578         for (const AccessInfo &AI : *Accesses)
6579           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6580             return false;
6581     }
6582 
6583     return true;
6584   }
6585 
6586   ChangeStatus indicatePessimisticFixpoint() override {
6587     // If we give up and indicate a pessimistic fixpoint this instruction will
6588     // become an access for all potential access kinds:
6589     // TODO: Add pointers for argmemonly and globals to improve the results of
6590     //       checkForAllAccessesToMemoryKind.
6591     bool Changed = false;
6592     MemoryLocationsKind KnownMLK = getKnown();
6593     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6594     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6595       if (!(CurMLK & KnownMLK))
6596         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6597                                   getAccessKindFromInst(I));
6598     return AAMemoryLocation::indicatePessimisticFixpoint();
6599   }
6600 
6601 protected:
6602   /// Helper struct to tie together an instruction that has a read or write
6603   /// effect with the pointer it accesses (if any).
6604   struct AccessInfo {
6605 
6606     /// The instruction that caused the access.
6607     const Instruction *I;
6608 
6609     /// The base pointer that is accessed, or null if unknown.
6610     const Value *Ptr;
6611 
6612     /// The kind of access (read/write/read+write).
6613     AccessKind Kind;
6614 
6615     bool operator==(const AccessInfo &RHS) const {
6616       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6617     }
6618     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6619       if (LHS.I != RHS.I)
6620         return LHS.I < RHS.I;
6621       if (LHS.Ptr != RHS.Ptr)
6622         return LHS.Ptr < RHS.Ptr;
6623       if (LHS.Kind != RHS.Kind)
6624         return LHS.Kind < RHS.Kind;
6625       return false;
6626     }
6627   };
6628 
6629   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6630   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6631   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6632   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6633 
6634   /// Categorize the pointer arguments of CB that might access memory in
6635   /// AccessedLoc and update the state and access map accordingly.
6636   void
6637   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6638                                      AAMemoryLocation::StateType &AccessedLocs,
6639                                      bool &Changed);
6640 
6641   /// Return the kind(s) of location that may be accessed by \p V.
6642   AAMemoryLocation::MemoryLocationsKind
6643   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6644 
6645   /// Return the access kind as determined by \p I.
6646   AccessKind getAccessKindFromInst(const Instruction *I) {
6647     AccessKind AK = READ_WRITE;
6648     if (I) {
6649       AK = I->mayReadFromMemory() ? READ : NONE;
6650       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6651     }
6652     return AK;
6653   }
6654 
6655   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6656   /// an access of kind \p AK to a \p MLK memory location with the access
6657   /// pointer \p Ptr.
6658   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6659                                  MemoryLocationsKind MLK, const Instruction *I,
6660                                  const Value *Ptr, bool &Changed,
6661                                  AccessKind AK = READ_WRITE) {
6662 
6663     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6664     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6665     if (!Accesses)
6666       Accesses = new (Allocator) AccessSet();
6667     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6668     State.removeAssumedBits(MLK);
6669   }
6670 
6671   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6672   /// arguments, and update the state and access map accordingly.
6673   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6674                           AAMemoryLocation::StateType &State, bool &Changed);
6675 
6676   /// Used to allocate access sets.
6677   BumpPtrAllocator &Allocator;
6678 
6679   /// The set of IR attributes AAMemoryLocation deals with.
6680   static const Attribute::AttrKind AttrKinds[4];
6681 };
6682 
6683 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6684     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6685     Attribute::InaccessibleMemOrArgMemOnly};
6686 
6687 void AAMemoryLocationImpl::categorizePtrValue(
6688     Attributor &A, const Instruction &I, const Value &Ptr,
6689     AAMemoryLocation::StateType &State, bool &Changed) {
6690   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6691                     << Ptr << " ["
6692                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6693 
6694   auto StripGEPCB = [](Value *V) -> Value * {
6695     auto *GEP = dyn_cast<GEPOperator>(V);
6696     while (GEP) {
6697       V = GEP->getPointerOperand();
6698       GEP = dyn_cast<GEPOperator>(V);
6699     }
6700     return V;
6701   };
6702 
6703   auto VisitValueCB = [&](Value &V, const Instruction *,
6704                           AAMemoryLocation::StateType &T,
6705                           bool Stripped) -> bool {
6706     // TODO: recognize the TBAA used for constant accesses.
6707     MemoryLocationsKind MLK = NO_LOCATIONS;
6708     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6709     if (isa<UndefValue>(V))
6710       return true;
6711     if (auto *Arg = dyn_cast<Argument>(&V)) {
6712       if (Arg->hasByValAttr())
6713         MLK = NO_LOCAL_MEM;
6714       else
6715         MLK = NO_ARGUMENT_MEM;
6716     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6717       // Reading constant memory is not treated as a read "effect" by the
6718       // function attr pass so we won't neither. Constants defined by TBAA are
6719       // similar. (We know we do not write it because it is constant.)
6720       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6721         if (GVar->isConstant())
6722           return true;
6723 
6724       if (GV->hasLocalLinkage())
6725         MLK = NO_GLOBAL_INTERNAL_MEM;
6726       else
6727         MLK = NO_GLOBAL_EXTERNAL_MEM;
6728     } else if (isa<ConstantPointerNull>(V) &&
6729                !NullPointerIsDefined(getAssociatedFunction(),
6730                                      V.getType()->getPointerAddressSpace())) {
6731       return true;
6732     } else if (isa<AllocaInst>(V)) {
6733       MLK = NO_LOCAL_MEM;
6734     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6735       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6736           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6737       if (NoAliasAA.isAssumedNoAlias())
6738         MLK = NO_MALLOCED_MEM;
6739       else
6740         MLK = NO_UNKOWN_MEM;
6741     } else {
6742       MLK = NO_UNKOWN_MEM;
6743     }
6744 
6745     assert(MLK != NO_LOCATIONS && "No location specified!");
6746     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6747                               getAccessKindFromInst(&I));
6748     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6749                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6750                       << "\n");
6751     return true;
6752   };
6753 
6754   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6755           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6756           /* UseValueSimplify */ true,
6757           /* MaxValues */ 32, StripGEPCB)) {
6758     LLVM_DEBUG(
6759         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6760     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6761                               getAccessKindFromInst(&I));
6762   } else {
6763     LLVM_DEBUG(
6764         dbgs()
6765         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6766         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6767   }
6768 }
6769 
6770 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6771     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6772     bool &Changed) {
6773   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6774 
6775     // Skip non-pointer arguments.
6776     const Value *ArgOp = CB.getArgOperand(ArgNo);
6777     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6778       continue;
6779 
6780     // Skip readnone arguments.
6781     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6782     const auto &ArgOpMemLocationAA =
6783         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6784 
6785     if (ArgOpMemLocationAA.isAssumedReadNone())
6786       continue;
6787 
6788     // Categorize potentially accessed pointer arguments as if there was an
6789     // access instruction with them as pointer.
6790     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6791   }
6792 }
6793 
6794 AAMemoryLocation::MemoryLocationsKind
6795 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6796                                                   bool &Changed) {
6797   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6798                     << I << "\n");
6799 
6800   AAMemoryLocation::StateType AccessedLocs;
6801   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6802 
6803   if (auto *CB = dyn_cast<CallBase>(&I)) {
6804 
6805     // First check if we assume any memory is access is visible.
6806     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6807         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6808     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6809                       << " [" << CBMemLocationAA << "]\n");
6810 
6811     if (CBMemLocationAA.isAssumedReadNone())
6812       return NO_LOCATIONS;
6813 
6814     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6815       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6816                                 Changed, getAccessKindFromInst(&I));
6817       return AccessedLocs.getAssumed();
6818     }
6819 
6820     uint32_t CBAssumedNotAccessedLocs =
6821         CBMemLocationAA.getAssumedNotAccessedLocation();
6822 
6823     // Set the argmemonly and global bit as we handle them separately below.
6824     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6825         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6826 
6827     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6828       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6829         continue;
6830       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6831                                 getAccessKindFromInst(&I));
6832     }
6833 
6834     // Now handle global memory if it might be accessed. This is slightly tricky
6835     // as NO_GLOBAL_MEM has multiple bits set.
6836     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6837     if (HasGlobalAccesses) {
6838       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6839                             AccessKind Kind, MemoryLocationsKind MLK) {
6840         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6841                                   getAccessKindFromInst(&I));
6842         return true;
6843       };
6844       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6845               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6846         return AccessedLocs.getWorstState();
6847     }
6848 
6849     LLVM_DEBUG(
6850         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6851                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6852 
6853     // Now handle argument memory if it might be accessed.
6854     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6855     if (HasArgAccesses)
6856       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6857 
6858     LLVM_DEBUG(
6859         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6860                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6861 
6862     return AccessedLocs.getAssumed();
6863   }
6864 
6865   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6866     LLVM_DEBUG(
6867         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6868                << I << " [" << *Ptr << "]\n");
6869     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6870     return AccessedLocs.getAssumed();
6871   }
6872 
6873   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6874                     << I << "\n");
6875   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6876                             getAccessKindFromInst(&I));
6877   return AccessedLocs.getAssumed();
6878 }
6879 
6880 /// An AA to represent the memory behavior function attributes.
6881 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6882   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6883       : AAMemoryLocationImpl(IRP, A) {}
6884 
6885   /// See AbstractAttribute::updateImpl(Attributor &A).
6886   virtual ChangeStatus updateImpl(Attributor &A) override {
6887 
6888     const auto &MemBehaviorAA =
6889         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6890     if (MemBehaviorAA.isAssumedReadNone()) {
6891       if (MemBehaviorAA.isKnownReadNone())
6892         return indicateOptimisticFixpoint();
6893       assert(isAssumedReadNone() &&
6894              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6895       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6896       return ChangeStatus::UNCHANGED;
6897     }
6898 
6899     // The current assumed state used to determine a change.
6900     auto AssumedState = getAssumed();
6901     bool Changed = false;
6902 
6903     auto CheckRWInst = [&](Instruction &I) {
6904       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6905       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6906                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6907       removeAssumedBits(inverseLocation(MLK, false, false));
6908       // Stop once only the valid bit set in the *not assumed location*, thus
6909       // once we don't actually exclude any memory locations in the state.
6910       return getAssumedNotAccessedLocation() != VALID_STATE;
6911     };
6912 
6913     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6914       return indicatePessimisticFixpoint();
6915 
6916     Changed |= AssumedState != getAssumed();
6917     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6918   }
6919 
6920   /// See AbstractAttribute::trackStatistics()
6921   void trackStatistics() const override {
6922     if (isAssumedReadNone())
6923       STATS_DECLTRACK_FN_ATTR(readnone)
6924     else if (isAssumedArgMemOnly())
6925       STATS_DECLTRACK_FN_ATTR(argmemonly)
6926     else if (isAssumedInaccessibleMemOnly())
6927       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6928     else if (isAssumedInaccessibleOrArgMemOnly())
6929       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6930   }
6931 };
6932 
6933 /// AAMemoryLocation attribute for call sites.
6934 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6935   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6936       : AAMemoryLocationImpl(IRP, A) {}
6937 
6938   /// See AbstractAttribute::initialize(...).
6939   void initialize(Attributor &A) override {
6940     AAMemoryLocationImpl::initialize(A);
6941     Function *F = getAssociatedFunction();
6942     if (!F || F->isDeclaration())
6943       indicatePessimisticFixpoint();
6944   }
6945 
6946   /// See AbstractAttribute::updateImpl(...).
6947   ChangeStatus updateImpl(Attributor &A) override {
6948     // TODO: Once we have call site specific value information we can provide
6949     //       call site specific liveness liveness information and then it makes
6950     //       sense to specialize attributes for call sites arguments instead of
6951     //       redirecting requests to the callee argument.
6952     Function *F = getAssociatedFunction();
6953     const IRPosition &FnPos = IRPosition::function(*F);
6954     auto &FnAA =
6955         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6956     bool Changed = false;
6957     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6958                           AccessKind Kind, MemoryLocationsKind MLK) {
6959       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6960                                 getAccessKindFromInst(I));
6961       return true;
6962     };
6963     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6964       return indicatePessimisticFixpoint();
6965     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6966   }
6967 
6968   /// See AbstractAttribute::trackStatistics()
6969   void trackStatistics() const override {
6970     if (isAssumedReadNone())
6971       STATS_DECLTRACK_CS_ATTR(readnone)
6972   }
6973 };
6974 
6975 /// ------------------ Value Constant Range Attribute -------------------------
6976 
6977 struct AAValueConstantRangeImpl : AAValueConstantRange {
6978   using StateType = IntegerRangeState;
6979   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6980       : AAValueConstantRange(IRP, A) {}
6981 
6982   /// See AbstractAttribute::getAsStr().
6983   const std::string getAsStr() const override {
6984     std::string Str;
6985     llvm::raw_string_ostream OS(Str);
6986     OS << "range(" << getBitWidth() << ")<";
6987     getKnown().print(OS);
6988     OS << " / ";
6989     getAssumed().print(OS);
6990     OS << ">";
6991     return OS.str();
6992   }
6993 
6994   /// Helper function to get a SCEV expr for the associated value at program
6995   /// point \p I.
6996   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6997     if (!getAnchorScope())
6998       return nullptr;
6999 
7000     ScalarEvolution *SE =
7001         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7002             *getAnchorScope());
7003 
7004     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7005         *getAnchorScope());
7006 
7007     if (!SE || !LI)
7008       return nullptr;
7009 
7010     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7011     if (!I)
7012       return S;
7013 
7014     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7015   }
7016 
7017   /// Helper function to get a range from SCEV for the associated value at
7018   /// program point \p I.
7019   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7020                                          const Instruction *I = nullptr) const {
7021     if (!getAnchorScope())
7022       return getWorstState(getBitWidth());
7023 
7024     ScalarEvolution *SE =
7025         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7026             *getAnchorScope());
7027 
7028     const SCEV *S = getSCEV(A, I);
7029     if (!SE || !S)
7030       return getWorstState(getBitWidth());
7031 
7032     return SE->getUnsignedRange(S);
7033   }
7034 
7035   /// Helper function to get a range from LVI for the associated value at
7036   /// program point \p I.
7037   ConstantRange
7038   getConstantRangeFromLVI(Attributor &A,
7039                           const Instruction *CtxI = nullptr) const {
7040     if (!getAnchorScope())
7041       return getWorstState(getBitWidth());
7042 
7043     LazyValueInfo *LVI =
7044         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7045             *getAnchorScope());
7046 
7047     if (!LVI || !CtxI)
7048       return getWorstState(getBitWidth());
7049     return LVI->getConstantRange(&getAssociatedValue(),
7050                                  const_cast<Instruction *>(CtxI));
7051   }
7052 
7053   /// See AAValueConstantRange::getKnownConstantRange(..).
7054   ConstantRange
7055   getKnownConstantRange(Attributor &A,
7056                         const Instruction *CtxI = nullptr) const override {
7057     if (!CtxI || CtxI == getCtxI())
7058       return getKnown();
7059 
7060     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7061     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7062     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7063   }
7064 
7065   /// See AAValueConstantRange::getAssumedConstantRange(..).
7066   ConstantRange
7067   getAssumedConstantRange(Attributor &A,
7068                           const Instruction *CtxI = nullptr) const override {
7069     // TODO: Make SCEV use Attributor assumption.
7070     //       We may be able to bound a variable range via assumptions in
7071     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7072     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7073 
7074     if (!CtxI || CtxI == getCtxI())
7075       return getAssumed();
7076 
7077     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7078     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7079     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7080   }
7081 
7082   /// See AbstractAttribute::initialize(..).
7083   void initialize(Attributor &A) override {
7084     // Intersect a range given by SCEV.
7085     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7086 
7087     // Intersect a range given by LVI.
7088     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7089   }
7090 
7091   /// Helper function to create MDNode for range metadata.
7092   static MDNode *
7093   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7094                             const ConstantRange &AssumedConstantRange) {
7095     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7096                                   Ty, AssumedConstantRange.getLower())),
7097                               ConstantAsMetadata::get(ConstantInt::get(
7098                                   Ty, AssumedConstantRange.getUpper()))};
7099     return MDNode::get(Ctx, LowAndHigh);
7100   }
7101 
7102   /// Return true if \p Assumed is included in \p KnownRanges.
7103   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7104 
7105     if (Assumed.isFullSet())
7106       return false;
7107 
7108     if (!KnownRanges)
7109       return true;
7110 
7111     // If multiple ranges are annotated in IR, we give up to annotate assumed
7112     // range for now.
7113 
7114     // TODO:  If there exists a known range which containts assumed range, we
7115     // can say assumed range is better.
7116     if (KnownRanges->getNumOperands() > 2)
7117       return false;
7118 
7119     ConstantInt *Lower =
7120         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7121     ConstantInt *Upper =
7122         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7123 
7124     ConstantRange Known(Lower->getValue(), Upper->getValue());
7125     return Known.contains(Assumed) && Known != Assumed;
7126   }
7127 
7128   /// Helper function to set range metadata.
7129   static bool
7130   setRangeMetadataIfisBetterRange(Instruction *I,
7131                                   const ConstantRange &AssumedConstantRange) {
7132     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7133     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7134       if (!AssumedConstantRange.isEmptySet()) {
7135         I->setMetadata(LLVMContext::MD_range,
7136                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7137                                                  AssumedConstantRange));
7138         return true;
7139       }
7140     }
7141     return false;
7142   }
7143 
7144   /// See AbstractAttribute::manifest()
7145   ChangeStatus manifest(Attributor &A) override {
7146     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7147     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7148     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7149 
7150     auto &V = getAssociatedValue();
7151     if (!AssumedConstantRange.isEmptySet() &&
7152         !AssumedConstantRange.isSingleElement()) {
7153       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7154         assert(I == getCtxI() && "Should not annotate an instruction which is "
7155                                  "not the context instruction");
7156         if (isa<CallInst>(I) || isa<LoadInst>(I))
7157           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7158             Changed = ChangeStatus::CHANGED;
7159       }
7160     }
7161 
7162     return Changed;
7163   }
7164 };
7165 
7166 struct AAValueConstantRangeArgument final
7167     : AAArgumentFromCallSiteArguments<
7168           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7169           true /* BridgeCallBaseContext */> {
7170   using Base = AAArgumentFromCallSiteArguments<
7171       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7172       true /* BridgeCallBaseContext */>;
7173   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7174       : Base(IRP, A) {}
7175 
7176   /// See AbstractAttribute::initialize(..).
7177   void initialize(Attributor &A) override {
7178     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7179       indicatePessimisticFixpoint();
7180     } else {
7181       Base::initialize(A);
7182     }
7183   }
7184 
7185   /// See AbstractAttribute::trackStatistics()
7186   void trackStatistics() const override {
7187     STATS_DECLTRACK_ARG_ATTR(value_range)
7188   }
7189 };
7190 
7191 struct AAValueConstantRangeReturned
7192     : AAReturnedFromReturnedValues<AAValueConstantRange,
7193                                    AAValueConstantRangeImpl,
7194                                    AAValueConstantRangeImpl::StateType,
7195                                    /* PropogateCallBaseContext */ true> {
7196   using Base =
7197       AAReturnedFromReturnedValues<AAValueConstantRange,
7198                                    AAValueConstantRangeImpl,
7199                                    AAValueConstantRangeImpl::StateType,
7200                                    /* PropogateCallBaseContext */ true>;
7201   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7202       : Base(IRP, A) {}
7203 
7204   /// See AbstractAttribute::initialize(...).
7205   void initialize(Attributor &A) override {}
7206 
7207   /// See AbstractAttribute::trackStatistics()
7208   void trackStatistics() const override {
7209     STATS_DECLTRACK_FNRET_ATTR(value_range)
7210   }
7211 };
7212 
7213 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7214   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7215       : AAValueConstantRangeImpl(IRP, A) {}
7216 
7217   /// See AbstractAttribute::initialize(...).
7218   void initialize(Attributor &A) override {
7219     AAValueConstantRangeImpl::initialize(A);
7220     Value &V = getAssociatedValue();
7221 
7222     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7223       unionAssumed(ConstantRange(C->getValue()));
7224       indicateOptimisticFixpoint();
7225       return;
7226     }
7227 
7228     if (isa<UndefValue>(&V)) {
7229       // Collapse the undef state to 0.
7230       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7231       indicateOptimisticFixpoint();
7232       return;
7233     }
7234 
7235     if (isa<CallBase>(&V))
7236       return;
7237 
7238     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7239       return;
7240     // If it is a load instruction with range metadata, use it.
7241     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7242       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7243         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7244         return;
7245       }
7246 
7247     // We can work with PHI and select instruction as we traverse their operands
7248     // during update.
7249     if (isa<SelectInst>(V) || isa<PHINode>(V))
7250       return;
7251 
7252     // Otherwise we give up.
7253     indicatePessimisticFixpoint();
7254 
7255     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7256                       << getAssociatedValue() << "\n");
7257   }
7258 
7259   bool calculateBinaryOperator(
7260       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7261       const Instruction *CtxI,
7262       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7263     Value *LHS = BinOp->getOperand(0);
7264     Value *RHS = BinOp->getOperand(1);
7265     // TODO: Allow non integers as well.
7266     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7267       return false;
7268 
7269     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7270         *this, IRPosition::value(*LHS, getCallBaseContext()),
7271         DepClassTy::REQUIRED);
7272     QuerriedAAs.push_back(&LHSAA);
7273     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7274 
7275     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7276         *this, IRPosition::value(*RHS, getCallBaseContext()),
7277         DepClassTy::REQUIRED);
7278     QuerriedAAs.push_back(&RHSAA);
7279     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7280 
7281     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7282 
7283     T.unionAssumed(AssumedRange);
7284 
7285     // TODO: Track a known state too.
7286 
7287     return T.isValidState();
7288   }
7289 
7290   bool calculateCastInst(
7291       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7292       const Instruction *CtxI,
7293       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7294     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7295     // TODO: Allow non integers as well.
7296     Value &OpV = *CastI->getOperand(0);
7297     if (!OpV.getType()->isIntegerTy())
7298       return false;
7299 
7300     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7301         *this, IRPosition::value(OpV, getCallBaseContext()),
7302         DepClassTy::REQUIRED);
7303     QuerriedAAs.push_back(&OpAA);
7304     T.unionAssumed(
7305         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7306     return T.isValidState();
7307   }
7308 
7309   bool
7310   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7311                    const Instruction *CtxI,
7312                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7313     Value *LHS = CmpI->getOperand(0);
7314     Value *RHS = CmpI->getOperand(1);
7315     // TODO: Allow non integers as well.
7316     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7317       return false;
7318 
7319     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7320         *this, IRPosition::value(*LHS, getCallBaseContext()),
7321         DepClassTy::REQUIRED);
7322     QuerriedAAs.push_back(&LHSAA);
7323     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7324         *this, IRPosition::value(*RHS, getCallBaseContext()),
7325         DepClassTy::REQUIRED);
7326     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7327     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7328 
7329     // If one of them is empty set, we can't decide.
7330     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7331       return true;
7332 
7333     bool MustTrue = false, MustFalse = false;
7334 
7335     auto AllowedRegion =
7336         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7337 
7338     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7339       MustFalse = true;
7340 
7341     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7342       MustTrue = true;
7343 
7344     assert((!MustTrue || !MustFalse) &&
7345            "Either MustTrue or MustFalse should be false!");
7346 
7347     if (MustTrue)
7348       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7349     else if (MustFalse)
7350       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7351     else
7352       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7353 
7354     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7355                       << " " << RHSAA << "\n");
7356 
7357     // TODO: Track a known state too.
7358     return T.isValidState();
7359   }
7360 
7361   /// See AbstractAttribute::updateImpl(...).
7362   ChangeStatus updateImpl(Attributor &A) override {
7363     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7364                             IntegerRangeState &T, bool Stripped) -> bool {
7365       Instruction *I = dyn_cast<Instruction>(&V);
7366       if (!I || isa<CallBase>(I)) {
7367 
7368         // If the value is not instruction, we query AA to Attributor.
7369         const auto &AA = A.getAAFor<AAValueConstantRange>(
7370             *this, IRPosition::value(V, getCallBaseContext()),
7371             DepClassTy::REQUIRED);
7372 
7373         // Clamp operator is not used to utilize a program point CtxI.
7374         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7375 
7376         return T.isValidState();
7377       }
7378 
7379       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7380       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7381         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7382           return false;
7383       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7384         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7385           return false;
7386       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7387         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7388           return false;
7389       } else {
7390         // Give up with other instructions.
7391         // TODO: Add other instructions
7392 
7393         T.indicatePessimisticFixpoint();
7394         return false;
7395       }
7396 
7397       // Catch circular reasoning in a pessimistic way for now.
7398       // TODO: Check how the range evolves and if we stripped anything, see also
7399       //       AADereferenceable or AAAlign for similar situations.
7400       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7401         if (QueriedAA != this)
7402           continue;
7403         // If we are in a stady state we do not need to worry.
7404         if (T.getAssumed() == getState().getAssumed())
7405           continue;
7406         T.indicatePessimisticFixpoint();
7407       }
7408 
7409       return T.isValidState();
7410     };
7411 
7412     IntegerRangeState T(getBitWidth());
7413 
7414     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7415             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7416             /* UseValueSimplify */ false))
7417       return indicatePessimisticFixpoint();
7418 
7419     return clampStateAndIndicateChange(getState(), T);
7420   }
7421 
7422   /// See AbstractAttribute::trackStatistics()
7423   void trackStatistics() const override {
7424     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7425   }
7426 };
7427 
7428 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7429   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7430       : AAValueConstantRangeImpl(IRP, A) {}
7431 
7432   /// See AbstractAttribute::initialize(...).
7433   ChangeStatus updateImpl(Attributor &A) override {
7434     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7435                      "not be called");
7436   }
7437 
7438   /// See AbstractAttribute::trackStatistics()
7439   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7440 };
7441 
7442 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7443   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7444       : AAValueConstantRangeFunction(IRP, A) {}
7445 
7446   /// See AbstractAttribute::trackStatistics()
7447   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7448 };
7449 
7450 struct AAValueConstantRangeCallSiteReturned
7451     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7452                                      AAValueConstantRangeImpl,
7453                                      AAValueConstantRangeImpl::StateType,
7454                                      /* IntroduceCallBaseContext */ true> {
7455   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7456       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7457                                        AAValueConstantRangeImpl,
7458                                        AAValueConstantRangeImpl::StateType,
7459                                        /* IntroduceCallBaseContext */ true>(IRP,
7460                                                                             A) {
7461   }
7462 
7463   /// See AbstractAttribute::initialize(...).
7464   void initialize(Attributor &A) override {
7465     // If it is a load instruction with range metadata, use the metadata.
7466     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7467       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7468         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7469 
7470     AAValueConstantRangeImpl::initialize(A);
7471   }
7472 
7473   /// See AbstractAttribute::trackStatistics()
7474   void trackStatistics() const override {
7475     STATS_DECLTRACK_CSRET_ATTR(value_range)
7476   }
7477 };
7478 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7479   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7480       : AAValueConstantRangeFloating(IRP, A) {}
7481 
7482   /// See AbstractAttribute::manifest()
7483   ChangeStatus manifest(Attributor &A) override {
7484     return ChangeStatus::UNCHANGED;
7485   }
7486 
7487   /// See AbstractAttribute::trackStatistics()
7488   void trackStatistics() const override {
7489     STATS_DECLTRACK_CSARG_ATTR(value_range)
7490   }
7491 };
7492 
7493 /// ------------------ Potential Values Attribute -------------------------
7494 
7495 struct AAPotentialValuesImpl : AAPotentialValues {
7496   using StateType = PotentialConstantIntValuesState;
7497 
7498   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7499       : AAPotentialValues(IRP, A) {}
7500 
7501   /// See AbstractAttribute::getAsStr().
7502   const std::string getAsStr() const override {
7503     std::string Str;
7504     llvm::raw_string_ostream OS(Str);
7505     OS << getState();
7506     return OS.str();
7507   }
7508 
7509   /// See AbstractAttribute::updateImpl(...).
7510   ChangeStatus updateImpl(Attributor &A) override {
7511     return indicatePessimisticFixpoint();
7512   }
7513 };
7514 
7515 struct AAPotentialValuesArgument final
7516     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7517                                       PotentialConstantIntValuesState> {
7518   using Base =
7519       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7520                                       PotentialConstantIntValuesState>;
7521   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7522       : Base(IRP, A) {}
7523 
7524   /// See AbstractAttribute::initialize(..).
7525   void initialize(Attributor &A) override {
7526     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7527       indicatePessimisticFixpoint();
7528     } else {
7529       Base::initialize(A);
7530     }
7531   }
7532 
7533   /// See AbstractAttribute::trackStatistics()
7534   void trackStatistics() const override {
7535     STATS_DECLTRACK_ARG_ATTR(potential_values)
7536   }
7537 };
7538 
7539 struct AAPotentialValuesReturned
7540     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7541   using Base =
7542       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7543   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7544       : Base(IRP, A) {}
7545 
7546   /// See AbstractAttribute::trackStatistics()
7547   void trackStatistics() const override {
7548     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7549   }
7550 };
7551 
7552 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7553   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7554       : AAPotentialValuesImpl(IRP, A) {}
7555 
7556   /// See AbstractAttribute::initialize(..).
7557   void initialize(Attributor &A) override {
7558     Value &V = getAssociatedValue();
7559 
7560     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7561       unionAssumed(C->getValue());
7562       indicateOptimisticFixpoint();
7563       return;
7564     }
7565 
7566     if (isa<UndefValue>(&V)) {
7567       unionAssumedWithUndef();
7568       indicateOptimisticFixpoint();
7569       return;
7570     }
7571 
7572     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7573       return;
7574 
7575     if (isa<SelectInst>(V) || isa<PHINode>(V))
7576       return;
7577 
7578     indicatePessimisticFixpoint();
7579 
7580     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7581                       << getAssociatedValue() << "\n");
7582   }
7583 
7584   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7585                                 const APInt &RHS) {
7586     ICmpInst::Predicate Pred = ICI->getPredicate();
7587     switch (Pred) {
7588     case ICmpInst::ICMP_UGT:
7589       return LHS.ugt(RHS);
7590     case ICmpInst::ICMP_SGT:
7591       return LHS.sgt(RHS);
7592     case ICmpInst::ICMP_EQ:
7593       return LHS.eq(RHS);
7594     case ICmpInst::ICMP_UGE:
7595       return LHS.uge(RHS);
7596     case ICmpInst::ICMP_SGE:
7597       return LHS.sge(RHS);
7598     case ICmpInst::ICMP_ULT:
7599       return LHS.ult(RHS);
7600     case ICmpInst::ICMP_SLT:
7601       return LHS.slt(RHS);
7602     case ICmpInst::ICMP_NE:
7603       return LHS.ne(RHS);
7604     case ICmpInst::ICMP_ULE:
7605       return LHS.ule(RHS);
7606     case ICmpInst::ICMP_SLE:
7607       return LHS.sle(RHS);
7608     default:
7609       llvm_unreachable("Invalid ICmp predicate!");
7610     }
7611   }
7612 
7613   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7614                                  uint32_t ResultBitWidth) {
7615     Instruction::CastOps CastOp = CI->getOpcode();
7616     switch (CastOp) {
7617     default:
7618       llvm_unreachable("unsupported or not integer cast");
7619     case Instruction::Trunc:
7620       return Src.trunc(ResultBitWidth);
7621     case Instruction::SExt:
7622       return Src.sext(ResultBitWidth);
7623     case Instruction::ZExt:
7624       return Src.zext(ResultBitWidth);
7625     case Instruction::BitCast:
7626       return Src;
7627     }
7628   }
7629 
7630   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7631                                        const APInt &LHS, const APInt &RHS,
7632                                        bool &SkipOperation, bool &Unsupported) {
7633     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7634     // Unsupported is set to true when the binary operator is not supported.
7635     // SkipOperation is set to true when UB occur with the given operand pair
7636     // (LHS, RHS).
7637     // TODO: we should look at nsw and nuw keywords to handle operations
7638     //       that create poison or undef value.
7639     switch (BinOpcode) {
7640     default:
7641       Unsupported = true;
7642       return LHS;
7643     case Instruction::Add:
7644       return LHS + RHS;
7645     case Instruction::Sub:
7646       return LHS - RHS;
7647     case Instruction::Mul:
7648       return LHS * RHS;
7649     case Instruction::UDiv:
7650       if (RHS.isNullValue()) {
7651         SkipOperation = true;
7652         return LHS;
7653       }
7654       return LHS.udiv(RHS);
7655     case Instruction::SDiv:
7656       if (RHS.isNullValue()) {
7657         SkipOperation = true;
7658         return LHS;
7659       }
7660       return LHS.sdiv(RHS);
7661     case Instruction::URem:
7662       if (RHS.isNullValue()) {
7663         SkipOperation = true;
7664         return LHS;
7665       }
7666       return LHS.urem(RHS);
7667     case Instruction::SRem:
7668       if (RHS.isNullValue()) {
7669         SkipOperation = true;
7670         return LHS;
7671       }
7672       return LHS.srem(RHS);
7673     case Instruction::Shl:
7674       return LHS.shl(RHS);
7675     case Instruction::LShr:
7676       return LHS.lshr(RHS);
7677     case Instruction::AShr:
7678       return LHS.ashr(RHS);
7679     case Instruction::And:
7680       return LHS & RHS;
7681     case Instruction::Or:
7682       return LHS | RHS;
7683     case Instruction::Xor:
7684       return LHS ^ RHS;
7685     }
7686   }
7687 
7688   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7689                                            const APInt &LHS, const APInt &RHS) {
7690     bool SkipOperation = false;
7691     bool Unsupported = false;
7692     APInt Result =
7693         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7694     if (Unsupported)
7695       return false;
7696     // If SkipOperation is true, we can ignore this operand pair (L, R).
7697     if (!SkipOperation)
7698       unionAssumed(Result);
7699     return isValidState();
7700   }
7701 
7702   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7703     auto AssumedBefore = getAssumed();
7704     Value *LHS = ICI->getOperand(0);
7705     Value *RHS = ICI->getOperand(1);
7706     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7707       return indicatePessimisticFixpoint();
7708 
7709     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7710                                                 DepClassTy::REQUIRED);
7711     if (!LHSAA.isValidState())
7712       return indicatePessimisticFixpoint();
7713 
7714     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7715                                                 DepClassTy::REQUIRED);
7716     if (!RHSAA.isValidState())
7717       return indicatePessimisticFixpoint();
7718 
7719     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7720     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7721 
7722     // TODO: make use of undef flag to limit potential values aggressively.
7723     bool MaybeTrue = false, MaybeFalse = false;
7724     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7725     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7726       // The result of any comparison between undefs can be soundly replaced
7727       // with undef.
7728       unionAssumedWithUndef();
7729     } else if (LHSAA.undefIsContained()) {
7730       for (const APInt &R : RHSAAPVS) {
7731         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7732         MaybeTrue |= CmpResult;
7733         MaybeFalse |= !CmpResult;
7734         if (MaybeTrue & MaybeFalse)
7735           return indicatePessimisticFixpoint();
7736       }
7737     } else if (RHSAA.undefIsContained()) {
7738       for (const APInt &L : LHSAAPVS) {
7739         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7740         MaybeTrue |= CmpResult;
7741         MaybeFalse |= !CmpResult;
7742         if (MaybeTrue & MaybeFalse)
7743           return indicatePessimisticFixpoint();
7744       }
7745     } else {
7746       for (const APInt &L : LHSAAPVS) {
7747         for (const APInt &R : RHSAAPVS) {
7748           bool CmpResult = calculateICmpInst(ICI, L, R);
7749           MaybeTrue |= CmpResult;
7750           MaybeFalse |= !CmpResult;
7751           if (MaybeTrue & MaybeFalse)
7752             return indicatePessimisticFixpoint();
7753         }
7754       }
7755     }
7756     if (MaybeTrue)
7757       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7758     if (MaybeFalse)
7759       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7760     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7761                                          : ChangeStatus::CHANGED;
7762   }
7763 
7764   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7765     auto AssumedBefore = getAssumed();
7766     Value *LHS = SI->getTrueValue();
7767     Value *RHS = SI->getFalseValue();
7768     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7769       return indicatePessimisticFixpoint();
7770 
7771     // TODO: Use assumed simplified condition value
7772     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7773                                                 DepClassTy::REQUIRED);
7774     if (!LHSAA.isValidState())
7775       return indicatePessimisticFixpoint();
7776 
7777     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7778                                                 DepClassTy::REQUIRED);
7779     if (!RHSAA.isValidState())
7780       return indicatePessimisticFixpoint();
7781 
7782     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7783       // select i1 *, undef , undef => undef
7784       unionAssumedWithUndef();
7785     else {
7786       unionAssumed(LHSAA);
7787       unionAssumed(RHSAA);
7788     }
7789     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7790                                          : ChangeStatus::CHANGED;
7791   }
7792 
7793   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7794     auto AssumedBefore = getAssumed();
7795     if (!CI->isIntegerCast())
7796       return indicatePessimisticFixpoint();
7797     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7798     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7799     Value *Src = CI->getOperand(0);
7800     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7801                                                 DepClassTy::REQUIRED);
7802     if (!SrcAA.isValidState())
7803       return indicatePessimisticFixpoint();
7804     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7805     if (SrcAA.undefIsContained())
7806       unionAssumedWithUndef();
7807     else {
7808       for (const APInt &S : SrcAAPVS) {
7809         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7810         unionAssumed(T);
7811       }
7812     }
7813     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7814                                          : ChangeStatus::CHANGED;
7815   }
7816 
7817   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7818     auto AssumedBefore = getAssumed();
7819     Value *LHS = BinOp->getOperand(0);
7820     Value *RHS = BinOp->getOperand(1);
7821     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7822       return indicatePessimisticFixpoint();
7823 
7824     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7825                                                 DepClassTy::REQUIRED);
7826     if (!LHSAA.isValidState())
7827       return indicatePessimisticFixpoint();
7828 
7829     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7830                                                 DepClassTy::REQUIRED);
7831     if (!RHSAA.isValidState())
7832       return indicatePessimisticFixpoint();
7833 
7834     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7835     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7836     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7837 
7838     // TODO: make use of undef flag to limit potential values aggressively.
7839     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7840       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7841         return indicatePessimisticFixpoint();
7842     } else if (LHSAA.undefIsContained()) {
7843       for (const APInt &R : RHSAAPVS) {
7844         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7845           return indicatePessimisticFixpoint();
7846       }
7847     } else if (RHSAA.undefIsContained()) {
7848       for (const APInt &L : LHSAAPVS) {
7849         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7850           return indicatePessimisticFixpoint();
7851       }
7852     } else {
7853       for (const APInt &L : LHSAAPVS) {
7854         for (const APInt &R : RHSAAPVS) {
7855           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7856             return indicatePessimisticFixpoint();
7857         }
7858       }
7859     }
7860     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7861                                          : ChangeStatus::CHANGED;
7862   }
7863 
7864   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7865     auto AssumedBefore = getAssumed();
7866     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7867       Value *IncomingValue = PHI->getIncomingValue(u);
7868       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7869           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7870       if (!PotentialValuesAA.isValidState())
7871         return indicatePessimisticFixpoint();
7872       if (PotentialValuesAA.undefIsContained())
7873         unionAssumedWithUndef();
7874       else
7875         unionAssumed(PotentialValuesAA.getAssumed());
7876     }
7877     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7878                                          : ChangeStatus::CHANGED;
7879   }
7880 
7881   /// See AbstractAttribute::updateImpl(...).
7882   ChangeStatus updateImpl(Attributor &A) override {
7883     Value &V = getAssociatedValue();
7884     Instruction *I = dyn_cast<Instruction>(&V);
7885 
7886     if (auto *ICI = dyn_cast<ICmpInst>(I))
7887       return updateWithICmpInst(A, ICI);
7888 
7889     if (auto *SI = dyn_cast<SelectInst>(I))
7890       return updateWithSelectInst(A, SI);
7891 
7892     if (auto *CI = dyn_cast<CastInst>(I))
7893       return updateWithCastInst(A, CI);
7894 
7895     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7896       return updateWithBinaryOperator(A, BinOp);
7897 
7898     if (auto *PHI = dyn_cast<PHINode>(I))
7899       return updateWithPHINode(A, PHI);
7900 
7901     return indicatePessimisticFixpoint();
7902   }
7903 
7904   /// See AbstractAttribute::trackStatistics()
7905   void trackStatistics() const override {
7906     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7907   }
7908 };
7909 
7910 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7911   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7912       : AAPotentialValuesImpl(IRP, A) {}
7913 
7914   /// See AbstractAttribute::initialize(...).
7915   ChangeStatus updateImpl(Attributor &A) override {
7916     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7917                      "not be called");
7918   }
7919 
7920   /// See AbstractAttribute::trackStatistics()
7921   void trackStatistics() const override {
7922     STATS_DECLTRACK_FN_ATTR(potential_values)
7923   }
7924 };
7925 
7926 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7927   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7928       : AAPotentialValuesFunction(IRP, A) {}
7929 
7930   /// See AbstractAttribute::trackStatistics()
7931   void trackStatistics() const override {
7932     STATS_DECLTRACK_CS_ATTR(potential_values)
7933   }
7934 };
7935 
7936 struct AAPotentialValuesCallSiteReturned
7937     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7938   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7939       : AACallSiteReturnedFromReturned<AAPotentialValues,
7940                                        AAPotentialValuesImpl>(IRP, A) {}
7941 
7942   /// See AbstractAttribute::trackStatistics()
7943   void trackStatistics() const override {
7944     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7945   }
7946 };
7947 
7948 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7949   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7950       : AAPotentialValuesFloating(IRP, A) {}
7951 
7952   /// See AbstractAttribute::initialize(..).
7953   void initialize(Attributor &A) override {
7954     Value &V = getAssociatedValue();
7955 
7956     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7957       unionAssumed(C->getValue());
7958       indicateOptimisticFixpoint();
7959       return;
7960     }
7961 
7962     if (isa<UndefValue>(&V)) {
7963       unionAssumedWithUndef();
7964       indicateOptimisticFixpoint();
7965       return;
7966     }
7967   }
7968 
7969   /// See AbstractAttribute::updateImpl(...).
7970   ChangeStatus updateImpl(Attributor &A) override {
7971     Value &V = getAssociatedValue();
7972     auto AssumedBefore = getAssumed();
7973     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7974                                              DepClassTy::REQUIRED);
7975     const auto &S = AA.getAssumed();
7976     unionAssumed(S);
7977     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7978                                          : ChangeStatus::CHANGED;
7979   }
7980 
7981   /// See AbstractAttribute::trackStatistics()
7982   void trackStatistics() const override {
7983     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7984   }
7985 };
7986 
7987 /// ------------------------ NoUndef Attribute ---------------------------------
7988 struct AANoUndefImpl : AANoUndef {
7989   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7990 
7991   /// See AbstractAttribute::initialize(...).
7992   void initialize(Attributor &A) override {
7993     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7994       indicateOptimisticFixpoint();
7995       return;
7996     }
7997     Value &V = getAssociatedValue();
7998     if (isa<UndefValue>(V))
7999       indicatePessimisticFixpoint();
8000     else if (isa<FreezeInst>(V))
8001       indicateOptimisticFixpoint();
8002     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8003              isGuaranteedNotToBeUndefOrPoison(&V))
8004       indicateOptimisticFixpoint();
8005     else
8006       AANoUndef::initialize(A);
8007   }
8008 
8009   /// See followUsesInMBEC
8010   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8011                        AANoUndef::StateType &State) {
8012     const Value *UseV = U->get();
8013     const DominatorTree *DT = nullptr;
8014     AssumptionCache *AC = nullptr;
8015     InformationCache &InfoCache = A.getInfoCache();
8016     if (Function *F = getAnchorScope()) {
8017       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8018       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8019     }
8020     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8021     bool TrackUse = false;
8022     // Track use for instructions which must produce undef or poison bits when
8023     // at least one operand contains such bits.
8024     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8025       TrackUse = true;
8026     return TrackUse;
8027   }
8028 
8029   /// See AbstractAttribute::getAsStr().
8030   const std::string getAsStr() const override {
8031     return getAssumed() ? "noundef" : "may-undef-or-poison";
8032   }
8033 
8034   ChangeStatus manifest(Attributor &A) override {
8035     // We don't manifest noundef attribute for dead positions because the
8036     // associated values with dead positions would be replaced with undef
8037     // values.
8038     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8039       return ChangeStatus::UNCHANGED;
8040     // A position whose simplified value does not have any value is
8041     // considered to be dead. We don't manifest noundef in such positions for
8042     // the same reason above.
8043     auto &ValueSimplifyAA =
8044         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8045     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8046       return ChangeStatus::UNCHANGED;
8047     return AANoUndef::manifest(A);
8048   }
8049 };
8050 
8051 struct AANoUndefFloating : public AANoUndefImpl {
8052   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8053       : AANoUndefImpl(IRP, A) {}
8054 
8055   /// See AbstractAttribute::initialize(...).
8056   void initialize(Attributor &A) override {
8057     AANoUndefImpl::initialize(A);
8058     if (!getState().isAtFixpoint())
8059       if (Instruction *CtxI = getCtxI())
8060         followUsesInMBEC(*this, A, getState(), *CtxI);
8061   }
8062 
8063   /// See AbstractAttribute::updateImpl(...).
8064   ChangeStatus updateImpl(Attributor &A) override {
8065     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8066                             AANoUndef::StateType &T, bool Stripped) -> bool {
8067       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8068                                              DepClassTy::REQUIRED);
8069       if (!Stripped && this == &AA) {
8070         T.indicatePessimisticFixpoint();
8071       } else {
8072         const AANoUndef::StateType &S =
8073             static_cast<const AANoUndef::StateType &>(AA.getState());
8074         T ^= S;
8075       }
8076       return T.isValidState();
8077     };
8078 
8079     StateType T;
8080     if (!genericValueTraversal<AANoUndef, StateType>(
8081             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8082       return indicatePessimisticFixpoint();
8083 
8084     return clampStateAndIndicateChange(getState(), T);
8085   }
8086 
8087   /// See AbstractAttribute::trackStatistics()
8088   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8089 };
8090 
8091 struct AANoUndefReturned final
8092     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8093   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8094       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8095 
8096   /// See AbstractAttribute::trackStatistics()
8097   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8098 };
8099 
8100 struct AANoUndefArgument final
8101     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8102   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8103       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8104 
8105   /// See AbstractAttribute::trackStatistics()
8106   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8107 };
8108 
8109 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8110   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8111       : AANoUndefFloating(IRP, A) {}
8112 
8113   /// See AbstractAttribute::trackStatistics()
8114   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8115 };
8116 
8117 struct AANoUndefCallSiteReturned final
8118     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8119   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8120       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8121 
8122   /// See AbstractAttribute::trackStatistics()
8123   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8124 };
8125 } // namespace
8126 
8127 const char AAReturnedValues::ID = 0;
8128 const char AANoUnwind::ID = 0;
8129 const char AANoSync::ID = 0;
8130 const char AANoFree::ID = 0;
8131 const char AANonNull::ID = 0;
8132 const char AANoRecurse::ID = 0;
8133 const char AAWillReturn::ID = 0;
8134 const char AAUndefinedBehavior::ID = 0;
8135 const char AANoAlias::ID = 0;
8136 const char AAReachability::ID = 0;
8137 const char AANoReturn::ID = 0;
8138 const char AAIsDead::ID = 0;
8139 const char AADereferenceable::ID = 0;
8140 const char AAAlign::ID = 0;
8141 const char AANoCapture::ID = 0;
8142 const char AAValueSimplify::ID = 0;
8143 const char AAHeapToStack::ID = 0;
8144 const char AAPrivatizablePtr::ID = 0;
8145 const char AAMemoryBehavior::ID = 0;
8146 const char AAMemoryLocation::ID = 0;
8147 const char AAValueConstantRange::ID = 0;
8148 const char AAPotentialValues::ID = 0;
8149 const char AANoUndef::ID = 0;
8150 
8151 // Macro magic to create the static generator function for attributes that
8152 // follow the naming scheme.
8153 
8154 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8155   case IRPosition::PK:                                                         \
8156     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8157 
8158 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8159   case IRPosition::PK:                                                         \
8160     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8161     ++NumAAs;                                                                  \
8162     break;
8163 
8164 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8165   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8166     CLASS *AA = nullptr;                                                       \
8167     switch (IRP.getPositionKind()) {                                           \
8168       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8169       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8170       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8171       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8172       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8173       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8174       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8175       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8176     }                                                                          \
8177     return *AA;                                                                \
8178   }
8179 
8180 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8181   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8182     CLASS *AA = nullptr;                                                       \
8183     switch (IRP.getPositionKind()) {                                           \
8184       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8185       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8186       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8187       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8188       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8189       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8190       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8191       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8192     }                                                                          \
8193     return *AA;                                                                \
8194   }
8195 
8196 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8197   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8198     CLASS *AA = nullptr;                                                       \
8199     switch (IRP.getPositionKind()) {                                           \
8200       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8201       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8202       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8203       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8204       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8205       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8206       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8207       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8208     }                                                                          \
8209     return *AA;                                                                \
8210   }
8211 
8212 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8213   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8214     CLASS *AA = nullptr;                                                       \
8215     switch (IRP.getPositionKind()) {                                           \
8216       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8217       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8218       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8219       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8220       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8221       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8222       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8223       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8224     }                                                                          \
8225     return *AA;                                                                \
8226   }
8227 
8228 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8229   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8230     CLASS *AA = nullptr;                                                       \
8231     switch (IRP.getPositionKind()) {                                           \
8232       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8233       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8234       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8235       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8236       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8237       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8238       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8239       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8240     }                                                                          \
8241     return *AA;                                                                \
8242   }
8243 
8244 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8245 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8246 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8247 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8248 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8249 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8250 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8251 
8252 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8253 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8254 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8255 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8256 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8257 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8258 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8259 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8260 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8261 
8262 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8263 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8264 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8265 
8266 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8267 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8268 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8269 
8270 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8271 
8272 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8273 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8274 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8275 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8276 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8277 #undef SWITCH_PK_CREATE
8278 #undef SWITCH_PK_INV
8279