1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumeBundleQueries.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/LazyValueInfo.h"
23 #include "llvm/Analysis/MemoryBuiltins.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/NoFolder.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 
34 #include <cassert>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "attributor"
39 
40 static cl::opt<bool> ManifestInternal(
41     "attributor-manifest-internal", cl::Hidden,
42     cl::desc("Manifest Attributor internal string attributes."),
43     cl::init(false));
44 
45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
46                                        cl::Hidden);
47 
48 template <>
49 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
50 
51 static cl::opt<unsigned, true> MaxPotentialValues(
52     "attributor-max-potential-values", cl::Hidden,
53     cl::desc("Maximum number of potential values to be "
54              "tracked for each position."),
55     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
56     cl::init(7));
57 
58 STATISTIC(NumAAs, "Number of abstract attributes created");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separately.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // Specialization of the operator<< for abstract attributes subclasses. This
105 // disambiguates situations where multiple operators are applicable.
106 namespace llvm {
107 #define PIPE_OPERATOR(CLASS)                                                   \
108   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
109     return OS << static_cast<const AbstractAttribute &>(AA);                   \
110   }
111 
112 PIPE_OPERATOR(AAIsDead)
113 PIPE_OPERATOR(AANoUnwind)
114 PIPE_OPERATOR(AANoSync)
115 PIPE_OPERATOR(AANoRecurse)
116 PIPE_OPERATOR(AAWillReturn)
117 PIPE_OPERATOR(AANoReturn)
118 PIPE_OPERATOR(AAReturnedValues)
119 PIPE_OPERATOR(AANonNull)
120 PIPE_OPERATOR(AANoAlias)
121 PIPE_OPERATOR(AADereferenceable)
122 PIPE_OPERATOR(AAAlign)
123 PIPE_OPERATOR(AANoCapture)
124 PIPE_OPERATOR(AAValueSimplify)
125 PIPE_OPERATOR(AANoFree)
126 PIPE_OPERATOR(AAHeapToStack)
127 PIPE_OPERATOR(AAReachability)
128 PIPE_OPERATOR(AAMemoryBehavior)
129 PIPE_OPERATOR(AAMemoryLocation)
130 PIPE_OPERATOR(AAValueConstantRange)
131 PIPE_OPERATOR(AAPrivatizablePtr)
132 PIPE_OPERATOR(AAUndefinedBehavior)
133 PIPE_OPERATOR(AAPotentialValues)
134 PIPE_OPERATOR(AANoUndef)
135 
136 #undef PIPE_OPERATOR
137 } // namespace llvm
138 
139 namespace {
140 
141 static Optional<ConstantInt *>
142 getAssumedConstantInt(Attributor &A, const Value &V,
143                       const AbstractAttribute &AA,
144                       bool &UsedAssumedInformation) {
145   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
146   if (C.hasValue())
147     return dyn_cast_or_null<ConstantInt>(C.getValue());
148   return llvm::None;
149 }
150 
151 /// Get pointer operand of memory accessing instruction. If \p I is
152 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
153 /// is set to false and the instruction is volatile, return nullptr.
154 static const Value *getPointerOperand(const Instruction *I,
155                                       bool AllowVolatile) {
156   if (auto *LI = dyn_cast<LoadInst>(I)) {
157     if (!AllowVolatile && LI->isVolatile())
158       return nullptr;
159     return LI->getPointerOperand();
160   }
161 
162   if (auto *SI = dyn_cast<StoreInst>(I)) {
163     if (!AllowVolatile && SI->isVolatile())
164       return nullptr;
165     return SI->getPointerOperand();
166   }
167 
168   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
169     if (!AllowVolatile && CXI->isVolatile())
170       return nullptr;
171     return CXI->getPointerOperand();
172   }
173 
174   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
175     if (!AllowVolatile && RMWI->isVolatile())
176       return nullptr;
177     return RMWI->getPointerOperand();
178   }
179 
180   return nullptr;
181 }
182 
183 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
184 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
185 /// getelement pointer instructions that traverse the natural type of \p Ptr if
186 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
187 /// through a cast to i8*.
188 ///
189 /// TODO: This could probably live somewhere more prominantly if it doesn't
190 ///       already exist.
191 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
192                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
193   assert(Offset >= 0 && "Negative offset not supported yet!");
194   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
195                     << "-bytes as " << *ResTy << "\n");
196 
197   // The initial type we are trying to traverse to get nice GEPs.
198   Type *Ty = Ptr->getType();
199 
200   SmallVector<Value *, 4> Indices;
201   std::string GEPName = Ptr->getName().str();
202   while (Offset) {
203     uint64_t Idx, Rem;
204 
205     if (auto *STy = dyn_cast<StructType>(Ty)) {
206       const StructLayout *SL = DL.getStructLayout(STy);
207       if (int64_t(SL->getSizeInBytes()) < Offset)
208         break;
209       Idx = SL->getElementContainingOffset(Offset);
210       assert(Idx < STy->getNumElements() && "Offset calculation error!");
211       Rem = Offset - SL->getElementOffset(Idx);
212       Ty = STy->getElementType(Idx);
213     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
214       Ty = PTy->getElementType();
215       if (!Ty->isSized())
216         break;
217       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
218       assert(ElementSize && "Expected type with size!");
219       Idx = Offset / ElementSize;
220       Rem = Offset % ElementSize;
221     } else {
222       // Non-aggregate type, we cast and make byte-wise progress now.
223       break;
224     }
225 
226     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
227                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
228 
229     GEPName += "." + std::to_string(Idx);
230     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
231     Offset = Rem;
232   }
233 
234   // Create a GEP if we collected indices above.
235   if (Indices.size())
236     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
237 
238   // If an offset is left we use byte-wise adjustment.
239   if (Offset) {
240     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
241     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
242                         GEPName + ".b" + Twine(Offset));
243   }
244 
245   // Ensure the result has the requested type.
246   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
247 
248   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
249   return Ptr;
250 }
251 
252 /// Recursively visit all values that might become \p IRP at some point. This
253 /// will be done by looking through cast instructions, selects, phis, and calls
254 /// with the "returned" attribute. Once we cannot look through the value any
255 /// further, the callback \p VisitValueCB is invoked and passed the current
256 /// value, the \p State, and a flag to indicate if we stripped anything.
257 /// Stripped means that we unpacked the value associated with \p IRP at least
258 /// once. Note that the value used for the callback may still be the value
259 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
260 /// we will never visit more values than specified by \p MaxValues.
261 template <typename AAType, typename StateTy>
262 static bool genericValueTraversal(
263     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
264     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
265         VisitValueCB,
266     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
267     function_ref<Value *(Value *)> StripCB = nullptr) {
268 
269   const AAIsDead *LivenessAA = nullptr;
270   if (IRP.getAnchorScope())
271     LivenessAA = &A.getAAFor<AAIsDead>(
272         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
273         /* TrackDependence */ false);
274   bool AnyDead = false;
275 
276   using Item = std::pair<Value *, const Instruction *>;
277   SmallSet<Item, 16> Visited;
278   SmallVector<Item, 16> Worklist;
279   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
280 
281   int Iteration = 0;
282   do {
283     Item I = Worklist.pop_back_val();
284     Value *V = I.first;
285     CtxI = I.second;
286     if (StripCB)
287       V = StripCB(V);
288 
289     // Check if we should process the current value. To prevent endless
290     // recursion keep a record of the values we followed!
291     if (!Visited.insert(I).second)
292       continue;
293 
294     // Make sure we limit the compile time for complex expressions.
295     if (Iteration++ >= MaxValues)
296       return false;
297 
298     // Explicitly look through calls with a "returned" attribute if we do
299     // not have a pointer as stripPointerCasts only works on them.
300     Value *NewV = nullptr;
301     if (V->getType()->isPointerTy()) {
302       NewV = V->stripPointerCasts();
303     } else {
304       auto *CB = dyn_cast<CallBase>(V);
305       if (CB && CB->getCalledFunction()) {
306         for (Argument &Arg : CB->getCalledFunction()->args())
307           if (Arg.hasReturnedAttr()) {
308             NewV = CB->getArgOperand(Arg.getArgNo());
309             break;
310           }
311       }
312     }
313     if (NewV && NewV != V) {
314       Worklist.push_back({NewV, CtxI});
315       continue;
316     }
317 
318     // Look through select instructions, visit both potential values.
319     if (auto *SI = dyn_cast<SelectInst>(V)) {
320       Worklist.push_back({SI->getTrueValue(), CtxI});
321       Worklist.push_back({SI->getFalseValue(), CtxI});
322       continue;
323     }
324 
325     // Look through phi nodes, visit all live operands.
326     if (auto *PHI = dyn_cast<PHINode>(V)) {
327       assert(LivenessAA &&
328              "Expected liveness in the presence of instructions!");
329       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
330         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
331         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
332                             LivenessAA,
333                             /* CheckBBLivenessOnly */ true)) {
334           AnyDead = true;
335           continue;
336         }
337         Worklist.push_back(
338             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
339       }
340       continue;
341     }
342 
343     if (UseValueSimplify && !isa<Constant>(V)) {
344       bool UsedAssumedInformation = false;
345       Optional<Constant *> C =
346           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
347       if (!C.hasValue())
348         continue;
349       if (Value *NewV = C.getValue()) {
350         Worklist.push_back({NewV, CtxI});
351         continue;
352       }
353     }
354 
355     // Once a leaf is reached we inform the user through the callback.
356     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
357       return false;
358   } while (!Worklist.empty());
359 
360   // If we actually used liveness information so we have to record a dependence.
361   if (AnyDead)
362     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
363 
364   // All values have been visited.
365   return true;
366 }
367 
368 const Value *stripAndAccumulateMinimalOffsets(
369     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
370     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
371     bool UseAssumed = false) {
372 
373   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
374     const IRPosition &Pos = IRPosition::value(V);
375     // Only track dependence if we are going to use the assumed info.
376     const AAValueConstantRange &ValueConstantRangeAA =
377         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
378                                          /* TrackDependence */ UseAssumed);
379     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
380                                      : ValueConstantRangeAA.getKnown();
381     // We can only use the lower part of the range because the upper part can
382     // be higher than what the value can really be.
383     ROffset = Range.getSignedMin();
384     return true;
385   };
386 
387   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
388                                                 AttributorAnalysis);
389 }
390 
391 static const Value *getMinimalBaseOfAccsesPointerOperand(
392     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
393     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
394   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
395   if (!Ptr)
396     return nullptr;
397   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
398   const Value *Base = stripAndAccumulateMinimalOffsets(
399       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
400 
401   BytesOffset = OffsetAPInt.getSExtValue();
402   return Base;
403 }
404 
405 static const Value *
406 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
407                                      const DataLayout &DL,
408                                      bool AllowNonInbounds = false) {
409   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
410   if (!Ptr)
411     return nullptr;
412 
413   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
414                                           AllowNonInbounds);
415 }
416 
417 /// Helper function to clamp a state \p S of type \p StateType with the
418 /// information in \p R and indicate/return if \p S did change (as-in update is
419 /// required to be run again).
420 template <typename StateType>
421 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
422   auto Assumed = S.getAssumed();
423   S ^= R;
424   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
425                                    : ChangeStatus::CHANGED;
426 }
427 
428 /// Clamp the information known for all returned values of a function
429 /// (identified by \p QueryingAA) into \p S.
430 template <typename AAType, typename StateType = typename AAType::StateType>
431 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
432                                      StateType &S) {
433   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
434                     << QueryingAA << " into " << S << "\n");
435 
436   assert((QueryingAA.getIRPosition().getPositionKind() ==
437               IRPosition::IRP_RETURNED ||
438           QueryingAA.getIRPosition().getPositionKind() ==
439               IRPosition::IRP_CALL_SITE_RETURNED) &&
440          "Can only clamp returned value states for a function returned or call "
441          "site returned position!");
442 
443   // Use an optional state as there might not be any return values and we want
444   // to join (IntegerState::operator&) the state of all there are.
445   Optional<StateType> T;
446 
447   // Callback for each possibly returned value.
448   auto CheckReturnValue = [&](Value &RV) -> bool {
449     const IRPosition &RVPos = IRPosition::value(RV);
450     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
451     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
452                       << " @ " << RVPos << "\n");
453     const StateType &AAS = AA.getState();
454     if (T.hasValue())
455       *T &= AAS;
456     else
457       T = AAS;
458     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
459                       << "\n");
460     return T->isValidState();
461   };
462 
463   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
464     S.indicatePessimisticFixpoint();
465   else if (T.hasValue())
466     S ^= *T;
467 }
468 
469 /// Helper class for generic deduction: return value -> returned position.
470 template <typename AAType, typename BaseType,
471           typename StateType = typename BaseType::StateType>
472 struct AAReturnedFromReturnedValues : public BaseType {
473   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
474       : BaseType(IRP, A) {}
475 
476   /// See AbstractAttribute::updateImpl(...).
477   ChangeStatus updateImpl(Attributor &A) override {
478     StateType S(StateType::getBestState(this->getState()));
479     clampReturnedValueStates<AAType, StateType>(A, *this, S);
480     // TODO: If we know we visited all returned values, thus no are assumed
481     // dead, we can take the known information from the state T.
482     return clampStateAndIndicateChange<StateType>(this->getState(), S);
483   }
484 };
485 
486 /// Clamp the information known at all call sites for a given argument
487 /// (identified by \p QueryingAA) into \p S.
488 template <typename AAType, typename StateType = typename AAType::StateType>
489 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
490                                         StateType &S) {
491   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
492                     << QueryingAA << " into " << S << "\n");
493 
494   assert(QueryingAA.getIRPosition().getPositionKind() ==
495              IRPosition::IRP_ARGUMENT &&
496          "Can only clamp call site argument states for an argument position!");
497 
498   // Use an optional state as there might not be any return values and we want
499   // to join (IntegerState::operator&) the state of all there are.
500   Optional<StateType> T;
501 
502   // The argument number which is also the call site argument number.
503   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
504 
505   auto CallSiteCheck = [&](AbstractCallSite ACS) {
506     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
507     // Check if a coresponding argument was found or if it is on not associated
508     // (which can happen for callback calls).
509     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
510       return false;
511 
512     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
513     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
514                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
515     const StateType &AAS = AA.getState();
516     if (T.hasValue())
517       *T &= AAS;
518     else
519       T = AAS;
520     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
521                       << "\n");
522     return T->isValidState();
523   };
524 
525   bool AllCallSitesKnown;
526   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
527                               AllCallSitesKnown))
528     S.indicatePessimisticFixpoint();
529   else if (T.hasValue())
530     S ^= *T;
531 }
532 
533 /// Helper class for generic deduction: call site argument -> argument position.
534 template <typename AAType, typename BaseType,
535           typename StateType = typename AAType::StateType>
536 struct AAArgumentFromCallSiteArguments : public BaseType {
537   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
538       : BaseType(IRP, A) {}
539 
540   /// See AbstractAttribute::updateImpl(...).
541   ChangeStatus updateImpl(Attributor &A) override {
542     StateType S(StateType::getBestState(this->getState()));
543     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
544     // TODO: If we know we visited all incoming values, thus no are assumed
545     // dead, we can take the known information from the state T.
546     return clampStateAndIndicateChange<StateType>(this->getState(), S);
547   }
548 };
549 
550 /// Helper class for generic replication: function returned -> cs returned.
551 template <typename AAType, typename BaseType,
552           typename StateType = typename BaseType::StateType>
553 struct AACallSiteReturnedFromReturned : public BaseType {
554   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
555       : BaseType(IRP, A) {}
556 
557   /// See AbstractAttribute::updateImpl(...).
558   ChangeStatus updateImpl(Attributor &A) override {
559     assert(this->getIRPosition().getPositionKind() ==
560                IRPosition::IRP_CALL_SITE_RETURNED &&
561            "Can only wrap function returned positions for call site returned "
562            "positions!");
563     auto &S = this->getState();
564 
565     const Function *AssociatedFunction =
566         this->getIRPosition().getAssociatedFunction();
567     if (!AssociatedFunction)
568       return S.indicatePessimisticFixpoint();
569 
570     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
571     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
572     return clampStateAndIndicateChange(S, AA.getState());
573   }
574 };
575 
576 /// Helper function to accumulate uses.
577 template <class AAType, typename StateType = typename AAType::StateType>
578 static void followUsesInContext(AAType &AA, Attributor &A,
579                                 MustBeExecutedContextExplorer &Explorer,
580                                 const Instruction *CtxI,
581                                 SetVector<const Use *> &Uses,
582                                 StateType &State) {
583   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
584   for (unsigned u = 0; u < Uses.size(); ++u) {
585     const Use *U = Uses[u];
586     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
587       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
588       if (Found && AA.followUseInMBEC(A, U, UserI, State))
589         for (const Use &Us : UserI->uses())
590           Uses.insert(&Us);
591     }
592   }
593 }
594 
595 /// Use the must-be-executed-context around \p I to add information into \p S.
596 /// The AAType class is required to have `followUseInMBEC` method with the
597 /// following signature and behaviour:
598 ///
599 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
600 /// U - Underlying use.
601 /// I - The user of the \p U.
602 /// Returns true if the value should be tracked transitively.
603 ///
604 template <class AAType, typename StateType = typename AAType::StateType>
605 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
606                              Instruction &CtxI) {
607 
608   // Container for (transitive) uses of the associated value.
609   SetVector<const Use *> Uses;
610   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
611     Uses.insert(&U);
612 
613   MustBeExecutedContextExplorer &Explorer =
614       A.getInfoCache().getMustBeExecutedContextExplorer();
615 
616   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
617 
618   if (S.isAtFixpoint())
619     return;
620 
621   SmallVector<const BranchInst *, 4> BrInsts;
622   auto Pred = [&](const Instruction *I) {
623     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
624       if (Br->isConditional())
625         BrInsts.push_back(Br);
626     return true;
627   };
628 
629   // Here, accumulate conditional branch instructions in the context. We
630   // explore the child paths and collect the known states. The disjunction of
631   // those states can be merged to its own state. Let ParentState_i be a state
632   // to indicate the known information for an i-th branch instruction in the
633   // context. ChildStates are created for its successors respectively.
634   //
635   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
636   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
637   //      ...
638   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
639   //
640   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
641   //
642   // FIXME: Currently, recursive branches are not handled. For example, we
643   // can't deduce that ptr must be dereferenced in below function.
644   //
645   // void f(int a, int c, int *ptr) {
646   //    if(a)
647   //      if (b) {
648   //        *ptr = 0;
649   //      } else {
650   //        *ptr = 1;
651   //      }
652   //    else {
653   //      if (b) {
654   //        *ptr = 0;
655   //      } else {
656   //        *ptr = 1;
657   //      }
658   //    }
659   // }
660 
661   Explorer.checkForAllContext(&CtxI, Pred);
662   for (const BranchInst *Br : BrInsts) {
663     StateType ParentState;
664 
665     // The known state of the parent state is a conjunction of children's
666     // known states so it is initialized with a best state.
667     ParentState.indicateOptimisticFixpoint();
668 
669     for (const BasicBlock *BB : Br->successors()) {
670       StateType ChildState;
671 
672       size_t BeforeSize = Uses.size();
673       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
674 
675       // Erase uses which only appear in the child.
676       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
677         It = Uses.erase(It);
678 
679       ParentState &= ChildState;
680     }
681 
682     // Use only known state.
683     S += ParentState;
684   }
685 }
686 
687 /// -----------------------NoUnwind Function Attribute--------------------------
688 
689 struct AANoUnwindImpl : AANoUnwind {
690   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
691 
692   const std::string getAsStr() const override {
693     return getAssumed() ? "nounwind" : "may-unwind";
694   }
695 
696   /// See AbstractAttribute::updateImpl(...).
697   ChangeStatus updateImpl(Attributor &A) override {
698     auto Opcodes = {
699         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
700         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
701         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
702 
703     auto CheckForNoUnwind = [&](Instruction &I) {
704       if (!I.mayThrow())
705         return true;
706 
707       if (const auto *CB = dyn_cast<CallBase>(&I)) {
708         const auto &NoUnwindAA =
709             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
710         return NoUnwindAA.isAssumedNoUnwind();
711       }
712       return false;
713     };
714 
715     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
716       return indicatePessimisticFixpoint();
717 
718     return ChangeStatus::UNCHANGED;
719   }
720 };
721 
722 struct AANoUnwindFunction final : public AANoUnwindImpl {
723   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
724       : AANoUnwindImpl(IRP, A) {}
725 
726   /// See AbstractAttribute::trackStatistics()
727   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
728 };
729 
730 /// NoUnwind attribute deduction for a call sites.
731 struct AANoUnwindCallSite final : AANoUnwindImpl {
732   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
733       : AANoUnwindImpl(IRP, A) {}
734 
735   /// See AbstractAttribute::initialize(...).
736   void initialize(Attributor &A) override {
737     AANoUnwindImpl::initialize(A);
738     Function *F = getAssociatedFunction();
739     if (!F || F->isDeclaration())
740       indicatePessimisticFixpoint();
741   }
742 
743   /// See AbstractAttribute::updateImpl(...).
744   ChangeStatus updateImpl(Attributor &A) override {
745     // TODO: Once we have call site specific value information we can provide
746     //       call site specific liveness information and then it makes
747     //       sense to specialize attributes for call sites arguments instead of
748     //       redirecting requests to the callee argument.
749     Function *F = getAssociatedFunction();
750     const IRPosition &FnPos = IRPosition::function(*F);
751     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
752     return clampStateAndIndicateChange(getState(), FnAA.getState());
753   }
754 
755   /// See AbstractAttribute::trackStatistics()
756   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
757 };
758 
759 /// --------------------- Function Return Values -------------------------------
760 
761 /// "Attribute" that collects all potential returned values and the return
762 /// instructions that they arise from.
763 ///
764 /// If there is a unique returned value R, the manifest method will:
765 ///   - mark R with the "returned" attribute, if R is an argument.
766 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
767 
768   /// Mapping of values potentially returned by the associated function to the
769   /// return instructions that might return them.
770   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
771 
772   /// Mapping to remember the number of returned values for a call site such
773   /// that we can avoid updates if nothing changed.
774   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
775 
776   /// Set of unresolved calls returned by the associated function.
777   SmallSetVector<CallBase *, 4> UnresolvedCalls;
778 
779   /// State flags
780   ///
781   ///{
782   bool IsFixed = false;
783   bool IsValidState = true;
784   ///}
785 
786 public:
787   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
788       : AAReturnedValues(IRP, A) {}
789 
790   /// See AbstractAttribute::initialize(...).
791   void initialize(Attributor &A) override {
792     // Reset the state.
793     IsFixed = false;
794     IsValidState = true;
795     ReturnedValues.clear();
796 
797     Function *F = getAssociatedFunction();
798     if (!F || F->isDeclaration()) {
799       indicatePessimisticFixpoint();
800       return;
801     }
802     assert(!F->getReturnType()->isVoidTy() &&
803            "Did not expect a void return type!");
804 
805     // The map from instruction opcodes to those instructions in the function.
806     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
807 
808     // Look through all arguments, if one is marked as returned we are done.
809     for (Argument &Arg : F->args()) {
810       if (Arg.hasReturnedAttr()) {
811         auto &ReturnInstSet = ReturnedValues[&Arg];
812         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
813           for (Instruction *RI : *Insts)
814             ReturnInstSet.insert(cast<ReturnInst>(RI));
815 
816         indicateOptimisticFixpoint();
817         return;
818       }
819     }
820 
821     if (!A.isFunctionIPOAmendable(*F))
822       indicatePessimisticFixpoint();
823   }
824 
825   /// See AbstractAttribute::manifest(...).
826   ChangeStatus manifest(Attributor &A) override;
827 
828   /// See AbstractAttribute::getState(...).
829   AbstractState &getState() override { return *this; }
830 
831   /// See AbstractAttribute::getState(...).
832   const AbstractState &getState() const override { return *this; }
833 
834   /// See AbstractAttribute::updateImpl(Attributor &A).
835   ChangeStatus updateImpl(Attributor &A) override;
836 
837   llvm::iterator_range<iterator> returned_values() override {
838     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
839   }
840 
841   llvm::iterator_range<const_iterator> returned_values() const override {
842     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
843   }
844 
845   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
846     return UnresolvedCalls;
847   }
848 
849   /// Return the number of potential return values, -1 if unknown.
850   size_t getNumReturnValues() const override {
851     return isValidState() ? ReturnedValues.size() : -1;
852   }
853 
854   /// Return an assumed unique return value if a single candidate is found. If
855   /// there cannot be one, return a nullptr. If it is not clear yet, return the
856   /// Optional::NoneType.
857   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
858 
859   /// See AbstractState::checkForAllReturnedValues(...).
860   bool checkForAllReturnedValuesAndReturnInsts(
861       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
862       const override;
863 
864   /// Pretty print the attribute similar to the IR representation.
865   const std::string getAsStr() const override;
866 
867   /// See AbstractState::isAtFixpoint().
868   bool isAtFixpoint() const override { return IsFixed; }
869 
870   /// See AbstractState::isValidState().
871   bool isValidState() const override { return IsValidState; }
872 
873   /// See AbstractState::indicateOptimisticFixpoint(...).
874   ChangeStatus indicateOptimisticFixpoint() override {
875     IsFixed = true;
876     return ChangeStatus::UNCHANGED;
877   }
878 
879   ChangeStatus indicatePessimisticFixpoint() override {
880     IsFixed = true;
881     IsValidState = false;
882     return ChangeStatus::CHANGED;
883   }
884 };
885 
886 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
887   ChangeStatus Changed = ChangeStatus::UNCHANGED;
888 
889   // Bookkeeping.
890   assert(isValidState());
891   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
892                   "Number of function with known return values");
893 
894   // Check if we have an assumed unique return value that we could manifest.
895   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
896 
897   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
898     return Changed;
899 
900   // Bookkeeping.
901   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
902                   "Number of function with unique return");
903 
904   // Callback to replace the uses of CB with the constant C.
905   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
906     if (CB.use_empty())
907       return ChangeStatus::UNCHANGED;
908     if (A.changeValueAfterManifest(CB, C))
909       return ChangeStatus::CHANGED;
910     return ChangeStatus::UNCHANGED;
911   };
912 
913   // If the assumed unique return value is an argument, annotate it.
914   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
915     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
916             getAssociatedFunction()->getReturnType())) {
917       getIRPosition() = IRPosition::argument(*UniqueRVArg);
918       Changed = IRAttribute::manifest(A);
919     }
920   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
921     // We can replace the returned value with the unique returned constant.
922     Value &AnchorValue = getAnchorValue();
923     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
924       for (const Use &U : F->uses())
925         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
926           if (CB->isCallee(&U)) {
927             Constant *RVCCast =
928                 CB->getType() == RVC->getType()
929                     ? RVC
930                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
931             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
932           }
933     } else {
934       assert(isa<CallBase>(AnchorValue) &&
935              "Expcected a function or call base anchor!");
936       Constant *RVCCast =
937           AnchorValue.getType() == RVC->getType()
938               ? RVC
939               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
940       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
941     }
942     if (Changed == ChangeStatus::CHANGED)
943       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
944                       "Number of function returns replaced by constant return");
945   }
946 
947   return Changed;
948 }
949 
950 const std::string AAReturnedValuesImpl::getAsStr() const {
951   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
952          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
953          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
954 }
955 
956 Optional<Value *>
957 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
958   // If checkForAllReturnedValues provides a unique value, ignoring potential
959   // undef values that can also be present, it is assumed to be the actual
960   // return value and forwarded to the caller of this method. If there are
961   // multiple, a nullptr is returned indicating there cannot be a unique
962   // returned value.
963   Optional<Value *> UniqueRV;
964 
965   auto Pred = [&](Value &RV) -> bool {
966     // If we found a second returned value and neither the current nor the saved
967     // one is an undef, there is no unique returned value. Undefs are special
968     // since we can pretend they have any value.
969     if (UniqueRV.hasValue() && UniqueRV != &RV &&
970         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
971       UniqueRV = nullptr;
972       return false;
973     }
974 
975     // Do not overwrite a value with an undef.
976     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
977       UniqueRV = &RV;
978 
979     return true;
980   };
981 
982   if (!A.checkForAllReturnedValues(Pred, *this))
983     UniqueRV = nullptr;
984 
985   return UniqueRV;
986 }
987 
988 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
989     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
990     const {
991   if (!isValidState())
992     return false;
993 
994   // Check all returned values but ignore call sites as long as we have not
995   // encountered an overdefined one during an update.
996   for (auto &It : ReturnedValues) {
997     Value *RV = It.first;
998 
999     CallBase *CB = dyn_cast<CallBase>(RV);
1000     if (CB && !UnresolvedCalls.count(CB))
1001       continue;
1002 
1003     if (!Pred(*RV, It.second))
1004       return false;
1005   }
1006 
1007   return true;
1008 }
1009 
1010 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1011   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1012   bool Changed = false;
1013 
1014   // State used in the value traversals starting in returned values.
1015   struct RVState {
1016     // The map in which we collect return values -> return instrs.
1017     decltype(ReturnedValues) &RetValsMap;
1018     // The flag to indicate a change.
1019     bool &Changed;
1020     // The return instrs we come from.
1021     SmallSetVector<ReturnInst *, 4> RetInsts;
1022   };
1023 
1024   // Callback for a leaf value returned by the associated function.
1025   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1026                          bool) -> bool {
1027     auto Size = RVS.RetValsMap[&Val].size();
1028     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1029     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1030     RVS.Changed |= Inserted;
1031     LLVM_DEBUG({
1032       if (Inserted)
1033         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1034                << " => " << RVS.RetInsts.size() << "\n";
1035     });
1036     return true;
1037   };
1038 
1039   // Helper method to invoke the generic value traversal.
1040   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1041                                 const Instruction *CtxI) {
1042     IRPosition RetValPos = IRPosition::value(RV);
1043     return genericValueTraversal<AAReturnedValues, RVState>(
1044         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1045         /* UseValueSimplify */ false);
1046   };
1047 
1048   // Callback for all "return intructions" live in the associated function.
1049   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1050     ReturnInst &Ret = cast<ReturnInst>(I);
1051     RVState RVS({ReturnedValues, Changed, {}});
1052     RVS.RetInsts.insert(&Ret);
1053     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1054   };
1055 
1056   // Start by discovering returned values from all live returned instructions in
1057   // the associated function.
1058   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1059     return indicatePessimisticFixpoint();
1060 
1061   // Once returned values "directly" present in the code are handled we try to
1062   // resolve returned calls. To avoid modifications to the ReturnedValues map
1063   // while we iterate over it we kept record of potential new entries in a copy
1064   // map, NewRVsMap.
1065   decltype(ReturnedValues) NewRVsMap;
1066 
1067   auto HandleReturnValue = [&](Value *RV,
1068                                SmallSetVector<ReturnInst *, 4> &RIs) {
1069     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1070                       << RIs.size() << " RIs\n");
1071     CallBase *CB = dyn_cast<CallBase>(RV);
1072     if (!CB || UnresolvedCalls.count(CB))
1073       return;
1074 
1075     if (!CB->getCalledFunction()) {
1076       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1077                         << "\n");
1078       UnresolvedCalls.insert(CB);
1079       return;
1080     }
1081 
1082     // TODO: use the function scope once we have call site AAReturnedValues.
1083     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1084         *this, IRPosition::function(*CB->getCalledFunction()));
1085     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1086                       << RetValAA << "\n");
1087 
1088     // Skip dead ends, thus if we do not know anything about the returned
1089     // call we mark it as unresolved and it will stay that way.
1090     if (!RetValAA.getState().isValidState()) {
1091       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1092                         << "\n");
1093       UnresolvedCalls.insert(CB);
1094       return;
1095     }
1096 
1097     // Do not try to learn partial information. If the callee has unresolved
1098     // return values we will treat the call as unresolved/opaque.
1099     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1100     if (!RetValAAUnresolvedCalls.empty()) {
1101       UnresolvedCalls.insert(CB);
1102       return;
1103     }
1104 
1105     // Now check if we can track transitively returned values. If possible, thus
1106     // if all return value can be represented in the current scope, do so.
1107     bool Unresolved = false;
1108     for (auto &RetValAAIt : RetValAA.returned_values()) {
1109       Value *RetVal = RetValAAIt.first;
1110       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1111           isa<Constant>(RetVal))
1112         continue;
1113       // Anything that did not fit in the above categories cannot be resolved,
1114       // mark the call as unresolved.
1115       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1116                            "cannot be translated: "
1117                         << *RetVal << "\n");
1118       UnresolvedCalls.insert(CB);
1119       Unresolved = true;
1120       break;
1121     }
1122 
1123     if (Unresolved)
1124       return;
1125 
1126     // Now track transitively returned values.
1127     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1128     if (NumRetAA == RetValAA.getNumReturnValues()) {
1129       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1130                            "changed since it was seen last\n");
1131       return;
1132     }
1133     NumRetAA = RetValAA.getNumReturnValues();
1134 
1135     for (auto &RetValAAIt : RetValAA.returned_values()) {
1136       Value *RetVal = RetValAAIt.first;
1137       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1138         // Arguments are mapped to call site operands and we begin the traversal
1139         // again.
1140         bool Unused = false;
1141         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1142         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1143         continue;
1144       }
1145       if (isa<CallBase>(RetVal)) {
1146         // Call sites are resolved by the callee attribute over time, no need to
1147         // do anything for us.
1148         continue;
1149       }
1150       if (isa<Constant>(RetVal)) {
1151         // Constants are valid everywhere, we can simply take them.
1152         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1153         continue;
1154       }
1155     }
1156   };
1157 
1158   for (auto &It : ReturnedValues)
1159     HandleReturnValue(It.first, It.second);
1160 
1161   // Because processing the new information can again lead to new return values
1162   // we have to be careful and iterate until this iteration is complete. The
1163   // idea is that we are in a stable state at the end of an update. All return
1164   // values have been handled and properly categorized. We might not update
1165   // again if we have not requested a non-fix attribute so we cannot "wait" for
1166   // the next update to analyze a new return value.
1167   while (!NewRVsMap.empty()) {
1168     auto It = std::move(NewRVsMap.back());
1169     NewRVsMap.pop_back();
1170 
1171     assert(!It.second.empty() && "Entry does not add anything.");
1172     auto &ReturnInsts = ReturnedValues[It.first];
1173     for (ReturnInst *RI : It.second)
1174       if (ReturnInsts.insert(RI)) {
1175         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1176                           << *It.first << " => " << *RI << "\n");
1177         HandleReturnValue(It.first, ReturnInsts);
1178         Changed = true;
1179       }
1180   }
1181 
1182   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1183   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1184 }
1185 
1186 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1187   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1188       : AAReturnedValuesImpl(IRP, A) {}
1189 
1190   /// See AbstractAttribute::trackStatistics()
1191   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1192 };
1193 
1194 /// Returned values information for a call sites.
1195 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1196   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1197       : AAReturnedValuesImpl(IRP, A) {}
1198 
1199   /// See AbstractAttribute::initialize(...).
1200   void initialize(Attributor &A) override {
1201     // TODO: Once we have call site specific value information we can provide
1202     //       call site specific liveness information and then it makes
1203     //       sense to specialize attributes for call sites instead of
1204     //       redirecting requests to the callee.
1205     llvm_unreachable("Abstract attributes for returned values are not "
1206                      "supported for call sites yet!");
1207   }
1208 
1209   /// See AbstractAttribute::updateImpl(...).
1210   ChangeStatus updateImpl(Attributor &A) override {
1211     return indicatePessimisticFixpoint();
1212   }
1213 
1214   /// See AbstractAttribute::trackStatistics()
1215   void trackStatistics() const override {}
1216 };
1217 
1218 /// ------------------------ NoSync Function Attribute -------------------------
1219 
1220 struct AANoSyncImpl : AANoSync {
1221   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1222 
1223   const std::string getAsStr() const override {
1224     return getAssumed() ? "nosync" : "may-sync";
1225   }
1226 
1227   /// See AbstractAttribute::updateImpl(...).
1228   ChangeStatus updateImpl(Attributor &A) override;
1229 
1230   /// Helper function used to determine whether an instruction is non-relaxed
1231   /// atomic. In other words, if an atomic instruction does not have unordered
1232   /// or monotonic ordering
1233   static bool isNonRelaxedAtomic(Instruction *I);
1234 
1235   /// Helper function used to determine whether an instruction is volatile.
1236   static bool isVolatile(Instruction *I);
1237 
1238   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1239   /// memset).
1240   static bool isNoSyncIntrinsic(Instruction *I);
1241 };
1242 
1243 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1244   if (!I->isAtomic())
1245     return false;
1246 
1247   AtomicOrdering Ordering;
1248   switch (I->getOpcode()) {
1249   case Instruction::AtomicRMW:
1250     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1251     break;
1252   case Instruction::Store:
1253     Ordering = cast<StoreInst>(I)->getOrdering();
1254     break;
1255   case Instruction::Load:
1256     Ordering = cast<LoadInst>(I)->getOrdering();
1257     break;
1258   case Instruction::Fence: {
1259     auto *FI = cast<FenceInst>(I);
1260     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1261       return false;
1262     Ordering = FI->getOrdering();
1263     break;
1264   }
1265   case Instruction::AtomicCmpXchg: {
1266     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1267     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1268     // Only if both are relaxed, than it can be treated as relaxed.
1269     // Otherwise it is non-relaxed.
1270     if (Success != AtomicOrdering::Unordered &&
1271         Success != AtomicOrdering::Monotonic)
1272       return true;
1273     if (Failure != AtomicOrdering::Unordered &&
1274         Failure != AtomicOrdering::Monotonic)
1275       return true;
1276     return false;
1277   }
1278   default:
1279     llvm_unreachable(
1280         "New atomic operations need to be known in the attributor.");
1281   }
1282 
1283   // Relaxed.
1284   if (Ordering == AtomicOrdering::Unordered ||
1285       Ordering == AtomicOrdering::Monotonic)
1286     return false;
1287   return true;
1288 }
1289 
1290 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1291 /// FIXME: We should ipmrove the handling of intrinsics.
1292 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1293   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1294     switch (II->getIntrinsicID()) {
1295     /// Element wise atomic memory intrinsics are can only be unordered,
1296     /// therefore nosync.
1297     case Intrinsic::memset_element_unordered_atomic:
1298     case Intrinsic::memmove_element_unordered_atomic:
1299     case Intrinsic::memcpy_element_unordered_atomic:
1300       return true;
1301     case Intrinsic::memset:
1302     case Intrinsic::memmove:
1303     case Intrinsic::memcpy:
1304       if (!cast<MemIntrinsic>(II)->isVolatile())
1305         return true;
1306       return false;
1307     default:
1308       return false;
1309     }
1310   }
1311   return false;
1312 }
1313 
1314 bool AANoSyncImpl::isVolatile(Instruction *I) {
1315   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1316 
1317   switch (I->getOpcode()) {
1318   case Instruction::AtomicRMW:
1319     return cast<AtomicRMWInst>(I)->isVolatile();
1320   case Instruction::Store:
1321     return cast<StoreInst>(I)->isVolatile();
1322   case Instruction::Load:
1323     return cast<LoadInst>(I)->isVolatile();
1324   case Instruction::AtomicCmpXchg:
1325     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1326   default:
1327     return false;
1328   }
1329 }
1330 
1331 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332 
1333   auto CheckRWInstForNoSync = [&](Instruction &I) {
1334     /// We are looking for volatile instructions or Non-Relaxed atomics.
1335     /// FIXME: We should improve the handling of intrinsics.
1336 
1337     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1338       return true;
1339 
1340     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1341       if (CB->hasFnAttr(Attribute::NoSync))
1342         return true;
1343 
1344       const auto &NoSyncAA =
1345           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1346       if (NoSyncAA.isAssumedNoSync())
1347         return true;
1348       return false;
1349     }
1350 
1351     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1352       return true;
1353 
1354     return false;
1355   };
1356 
1357   auto CheckForNoSync = [&](Instruction &I) {
1358     // At this point we handled all read/write effects and they are all
1359     // nosync, so they can be skipped.
1360     if (I.mayReadOrWriteMemory())
1361       return true;
1362 
1363     // non-convergent and readnone imply nosync.
1364     return !cast<CallBase>(I).isConvergent();
1365   };
1366 
1367   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1368       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1369     return indicatePessimisticFixpoint();
1370 
1371   return ChangeStatus::UNCHANGED;
1372 }
1373 
1374 struct AANoSyncFunction final : public AANoSyncImpl {
1375   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1376       : AANoSyncImpl(IRP, A) {}
1377 
1378   /// See AbstractAttribute::trackStatistics()
1379   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1380 };
1381 
1382 /// NoSync attribute deduction for a call sites.
1383 struct AANoSyncCallSite final : AANoSyncImpl {
1384   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1385       : AANoSyncImpl(IRP, A) {}
1386 
1387   /// See AbstractAttribute::initialize(...).
1388   void initialize(Attributor &A) override {
1389     AANoSyncImpl::initialize(A);
1390     Function *F = getAssociatedFunction();
1391     if (!F || F->isDeclaration())
1392       indicatePessimisticFixpoint();
1393   }
1394 
1395   /// See AbstractAttribute::updateImpl(...).
1396   ChangeStatus updateImpl(Attributor &A) override {
1397     // TODO: Once we have call site specific value information we can provide
1398     //       call site specific liveness information and then it makes
1399     //       sense to specialize attributes for call sites arguments instead of
1400     //       redirecting requests to the callee argument.
1401     Function *F = getAssociatedFunction();
1402     const IRPosition &FnPos = IRPosition::function(*F);
1403     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1404     return clampStateAndIndicateChange(getState(), FnAA.getState());
1405   }
1406 
1407   /// See AbstractAttribute::trackStatistics()
1408   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1409 };
1410 
1411 /// ------------------------ No-Free Attributes ----------------------------
1412 
1413 struct AANoFreeImpl : public AANoFree {
1414   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1415 
1416   /// See AbstractAttribute::updateImpl(...).
1417   ChangeStatus updateImpl(Attributor &A) override {
1418     auto CheckForNoFree = [&](Instruction &I) {
1419       const auto &CB = cast<CallBase>(I);
1420       if (CB.hasFnAttr(Attribute::NoFree))
1421         return true;
1422 
1423       const auto &NoFreeAA =
1424           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1425       return NoFreeAA.isAssumedNoFree();
1426     };
1427 
1428     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1429       return indicatePessimisticFixpoint();
1430     return ChangeStatus::UNCHANGED;
1431   }
1432 
1433   /// See AbstractAttribute::getAsStr().
1434   const std::string getAsStr() const override {
1435     return getAssumed() ? "nofree" : "may-free";
1436   }
1437 };
1438 
1439 struct AANoFreeFunction final : public AANoFreeImpl {
1440   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1441       : AANoFreeImpl(IRP, A) {}
1442 
1443   /// See AbstractAttribute::trackStatistics()
1444   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1445 };
1446 
1447 /// NoFree attribute deduction for a call sites.
1448 struct AANoFreeCallSite final : AANoFreeImpl {
1449   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1450       : AANoFreeImpl(IRP, A) {}
1451 
1452   /// See AbstractAttribute::initialize(...).
1453   void initialize(Attributor &A) override {
1454     AANoFreeImpl::initialize(A);
1455     Function *F = getAssociatedFunction();
1456     if (!F || F->isDeclaration())
1457       indicatePessimisticFixpoint();
1458   }
1459 
1460   /// See AbstractAttribute::updateImpl(...).
1461   ChangeStatus updateImpl(Attributor &A) override {
1462     // TODO: Once we have call site specific value information we can provide
1463     //       call site specific liveness information and then it makes
1464     //       sense to specialize attributes for call sites arguments instead of
1465     //       redirecting requests to the callee argument.
1466     Function *F = getAssociatedFunction();
1467     const IRPosition &FnPos = IRPosition::function(*F);
1468     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1469     return clampStateAndIndicateChange(getState(), FnAA.getState());
1470   }
1471 
1472   /// See AbstractAttribute::trackStatistics()
1473   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1474 };
1475 
1476 /// NoFree attribute for floating values.
1477 struct AANoFreeFloating : AANoFreeImpl {
1478   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1479       : AANoFreeImpl(IRP, A) {}
1480 
1481   /// See AbstractAttribute::trackStatistics()
1482   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1483 
1484   /// See Abstract Attribute::updateImpl(...).
1485   ChangeStatus updateImpl(Attributor &A) override {
1486     const IRPosition &IRP = getIRPosition();
1487 
1488     const auto &NoFreeAA =
1489         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1490     if (NoFreeAA.isAssumedNoFree())
1491       return ChangeStatus::UNCHANGED;
1492 
1493     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1494     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1495       Instruction *UserI = cast<Instruction>(U.getUser());
1496       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1497         if (CB->isBundleOperand(&U))
1498           return false;
1499         if (!CB->isArgOperand(&U))
1500           return true;
1501         unsigned ArgNo = CB->getArgOperandNo(&U);
1502 
1503         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1504             *this, IRPosition::callsite_argument(*CB, ArgNo));
1505         return NoFreeArg.isAssumedNoFree();
1506       }
1507 
1508       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1509           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1510         Follow = true;
1511         return true;
1512       }
1513       if (isa<ReturnInst>(UserI))
1514         return true;
1515 
1516       // Unknown user.
1517       return false;
1518     };
1519     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1520       return indicatePessimisticFixpoint();
1521 
1522     return ChangeStatus::UNCHANGED;
1523   }
1524 };
1525 
1526 /// NoFree attribute for a call site argument.
1527 struct AANoFreeArgument final : AANoFreeFloating {
1528   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1529       : AANoFreeFloating(IRP, A) {}
1530 
1531   /// See AbstractAttribute::trackStatistics()
1532   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1533 };
1534 
1535 /// NoFree attribute for call site arguments.
1536 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1537   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1538       : AANoFreeFloating(IRP, A) {}
1539 
1540   /// See AbstractAttribute::updateImpl(...).
1541   ChangeStatus updateImpl(Attributor &A) override {
1542     // TODO: Once we have call site specific value information we can provide
1543     //       call site specific liveness information and then it makes
1544     //       sense to specialize attributes for call sites arguments instead of
1545     //       redirecting requests to the callee argument.
1546     Argument *Arg = getAssociatedArgument();
1547     if (!Arg)
1548       return indicatePessimisticFixpoint();
1549     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1550     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1551     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1552   }
1553 
1554   /// See AbstractAttribute::trackStatistics()
1555   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1556 };
1557 
1558 /// NoFree attribute for function return value.
1559 struct AANoFreeReturned final : AANoFreeFloating {
1560   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1561       : AANoFreeFloating(IRP, A) {
1562     llvm_unreachable("NoFree is not applicable to function returns!");
1563   }
1564 
1565   /// See AbstractAttribute::initialize(...).
1566   void initialize(Attributor &A) override {
1567     llvm_unreachable("NoFree is not applicable to function returns!");
1568   }
1569 
1570   /// See AbstractAttribute::updateImpl(...).
1571   ChangeStatus updateImpl(Attributor &A) override {
1572     llvm_unreachable("NoFree is not applicable to function returns!");
1573   }
1574 
1575   /// See AbstractAttribute::trackStatistics()
1576   void trackStatistics() const override {}
1577 };
1578 
1579 /// NoFree attribute deduction for a call site return value.
1580 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1581   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1582       : AANoFreeFloating(IRP, A) {}
1583 
1584   ChangeStatus manifest(Attributor &A) override {
1585     return ChangeStatus::UNCHANGED;
1586   }
1587   /// See AbstractAttribute::trackStatistics()
1588   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1589 };
1590 
1591 /// ------------------------ NonNull Argument Attribute ------------------------
1592 static int64_t getKnownNonNullAndDerefBytesForUse(
1593     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1594     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1595   TrackUse = false;
1596 
1597   const Value *UseV = U->get();
1598   if (!UseV->getType()->isPointerTy())
1599     return 0;
1600 
1601   Type *PtrTy = UseV->getType();
1602   const Function *F = I->getFunction();
1603   bool NullPointerIsDefined =
1604       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1605   const DataLayout &DL = A.getInfoCache().getDL();
1606   if (const auto *CB = dyn_cast<CallBase>(I)) {
1607     if (CB->isBundleOperand(U)) {
1608       if (RetainedKnowledge RK = getKnowledgeFromUse(
1609               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1610         IsNonNull |=
1611             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1612         return RK.ArgValue;
1613       }
1614       return 0;
1615     }
1616 
1617     if (CB->isCallee(U)) {
1618       IsNonNull |= !NullPointerIsDefined;
1619       return 0;
1620     }
1621 
1622     unsigned ArgNo = CB->getArgOperandNo(U);
1623     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1624     // As long as we only use known information there is no need to track
1625     // dependences here.
1626     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1627                                                   /* TrackDependence */ false);
1628     IsNonNull |= DerefAA.isKnownNonNull();
1629     return DerefAA.getKnownDereferenceableBytes();
1630   }
1631 
1632   // We need to follow common pointer manipulation uses to the accesses they
1633   // feed into. We can try to be smart to avoid looking through things we do not
1634   // like for now, e.g., non-inbounds GEPs.
1635   if (isa<CastInst>(I)) {
1636     TrackUse = true;
1637     return 0;
1638   }
1639 
1640   if (isa<GetElementPtrInst>(I)) {
1641     TrackUse = true;
1642     return 0;
1643   }
1644 
1645   int64_t Offset;
1646   const Value *Base =
1647       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1648   if (Base) {
1649     if (Base == &AssociatedValue &&
1650         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1651       int64_t DerefBytes =
1652           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1653 
1654       IsNonNull |= !NullPointerIsDefined;
1655       return std::max(int64_t(0), DerefBytes);
1656     }
1657   }
1658 
1659   /// Corner case when an offset is 0.
1660   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1661                                               /*AllowNonInbounds*/ true);
1662   if (Base) {
1663     if (Offset == 0 && Base == &AssociatedValue &&
1664         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1665       int64_t DerefBytes =
1666           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1667       IsNonNull |= !NullPointerIsDefined;
1668       return std::max(int64_t(0), DerefBytes);
1669     }
1670   }
1671 
1672   return 0;
1673 }
1674 
1675 struct AANonNullImpl : AANonNull {
1676   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1677       : AANonNull(IRP, A),
1678         NullIsDefined(NullPointerIsDefined(
1679             getAnchorScope(),
1680             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1681 
1682   /// See AbstractAttribute::initialize(...).
1683   void initialize(Attributor &A) override {
1684     Value &V = getAssociatedValue();
1685     if (!NullIsDefined &&
1686         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1687                 /* IgnoreSubsumingPositions */ false, &A)) {
1688       indicateOptimisticFixpoint();
1689       return;
1690     }
1691 
1692     if (isa<ConstantPointerNull>(V)) {
1693       indicatePessimisticFixpoint();
1694       return;
1695     }
1696 
1697     AANonNull::initialize(A);
1698 
1699     bool CanBeNull = true;
1700     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1701       if (!CanBeNull) {
1702         indicateOptimisticFixpoint();
1703         return;
1704       }
1705     }
1706 
1707     if (isa<GlobalValue>(&getAssociatedValue())) {
1708       indicatePessimisticFixpoint();
1709       return;
1710     }
1711 
1712     if (Instruction *CtxI = getCtxI())
1713       followUsesInMBEC(*this, A, getState(), *CtxI);
1714   }
1715 
1716   /// See followUsesInMBEC
1717   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1718                        AANonNull::StateType &State) {
1719     bool IsNonNull = false;
1720     bool TrackUse = false;
1721     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1722                                        IsNonNull, TrackUse);
1723     State.setKnown(IsNonNull);
1724     return TrackUse;
1725   }
1726 
1727   /// See AbstractAttribute::getAsStr().
1728   const std::string getAsStr() const override {
1729     return getAssumed() ? "nonnull" : "may-null";
1730   }
1731 
1732   /// Flag to determine if the underlying value can be null and still allow
1733   /// valid accesses.
1734   const bool NullIsDefined;
1735 };
1736 
1737 /// NonNull attribute for a floating value.
1738 struct AANonNullFloating : public AANonNullImpl {
1739   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1740       : AANonNullImpl(IRP, A) {}
1741 
1742   /// See AbstractAttribute::updateImpl(...).
1743   ChangeStatus updateImpl(Attributor &A) override {
1744     const DataLayout &DL = A.getDataLayout();
1745 
1746     DominatorTree *DT = nullptr;
1747     AssumptionCache *AC = nullptr;
1748     InformationCache &InfoCache = A.getInfoCache();
1749     if (const Function *Fn = getAnchorScope()) {
1750       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1751       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1752     }
1753 
1754     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1755                             AANonNull::StateType &T, bool Stripped) -> bool {
1756       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1757       if (!Stripped && this == &AA) {
1758         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1759           T.indicatePessimisticFixpoint();
1760       } else {
1761         // Use abstract attribute information.
1762         const AANonNull::StateType &NS = AA.getState();
1763         T ^= NS;
1764       }
1765       return T.isValidState();
1766     };
1767 
1768     StateType T;
1769     if (!genericValueTraversal<AANonNull, StateType>(
1770             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1771       return indicatePessimisticFixpoint();
1772 
1773     return clampStateAndIndicateChange(getState(), T);
1774   }
1775 
1776   /// See AbstractAttribute::trackStatistics()
1777   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1778 };
1779 
1780 /// NonNull attribute for function return value.
1781 struct AANonNullReturned final
1782     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1783   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1784       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1785 
1786   /// See AbstractAttribute::getAsStr().
1787   const std::string getAsStr() const override {
1788     return getAssumed() ? "nonnull" : "may-null";
1789   }
1790 
1791   /// See AbstractAttribute::trackStatistics()
1792   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1793 };
1794 
1795 /// NonNull attribute for function argument.
1796 struct AANonNullArgument final
1797     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1798   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1799       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1803 };
1804 
1805 struct AANonNullCallSiteArgument final : AANonNullFloating {
1806   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1807       : AANonNullFloating(IRP, A) {}
1808 
1809   /// See AbstractAttribute::trackStatistics()
1810   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1811 };
1812 
1813 /// NonNull attribute for a call site return position.
1814 struct AANonNullCallSiteReturned final
1815     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1816   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1817       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1818 
1819   /// See AbstractAttribute::trackStatistics()
1820   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1821 };
1822 
1823 /// ------------------------ No-Recurse Attributes ----------------------------
1824 
1825 struct AANoRecurseImpl : public AANoRecurse {
1826   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1827 
1828   /// See AbstractAttribute::getAsStr()
1829   const std::string getAsStr() const override {
1830     return getAssumed() ? "norecurse" : "may-recurse";
1831   }
1832 };
1833 
1834 struct AANoRecurseFunction final : AANoRecurseImpl {
1835   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1836       : AANoRecurseImpl(IRP, A) {}
1837 
1838   /// See AbstractAttribute::initialize(...).
1839   void initialize(Attributor &A) override {
1840     AANoRecurseImpl::initialize(A);
1841     if (const Function *F = getAnchorScope())
1842       if (A.getInfoCache().getSccSize(*F) != 1)
1843         indicatePessimisticFixpoint();
1844   }
1845 
1846   /// See AbstractAttribute::updateImpl(...).
1847   ChangeStatus updateImpl(Attributor &A) override {
1848 
1849     // If all live call sites are known to be no-recurse, we are as well.
1850     auto CallSitePred = [&](AbstractCallSite ACS) {
1851       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1852           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1853           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1854       return NoRecurseAA.isKnownNoRecurse();
1855     };
1856     bool AllCallSitesKnown;
1857     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1858       // If we know all call sites and all are known no-recurse, we are done.
1859       // If all known call sites, which might not be all that exist, are known
1860       // to be no-recurse, we are not done but we can continue to assume
1861       // no-recurse. If one of the call sites we have not visited will become
1862       // live, another update is triggered.
1863       if (AllCallSitesKnown)
1864         indicateOptimisticFixpoint();
1865       return ChangeStatus::UNCHANGED;
1866     }
1867 
1868     // If the above check does not hold anymore we look at the calls.
1869     auto CheckForNoRecurse = [&](Instruction &I) {
1870       const auto &CB = cast<CallBase>(I);
1871       if (CB.hasFnAttr(Attribute::NoRecurse))
1872         return true;
1873 
1874       const auto &NoRecurseAA =
1875           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1876       if (!NoRecurseAA.isAssumedNoRecurse())
1877         return false;
1878 
1879       // Recursion to the same function
1880       if (CB.getCalledFunction() == getAnchorScope())
1881         return false;
1882 
1883       return true;
1884     };
1885 
1886     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1887       return indicatePessimisticFixpoint();
1888     return ChangeStatus::UNCHANGED;
1889   }
1890 
1891   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1892 };
1893 
1894 /// NoRecurse attribute deduction for a call sites.
1895 struct AANoRecurseCallSite final : AANoRecurseImpl {
1896   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1897       : AANoRecurseImpl(IRP, A) {}
1898 
1899   /// See AbstractAttribute::initialize(...).
1900   void initialize(Attributor &A) override {
1901     AANoRecurseImpl::initialize(A);
1902     Function *F = getAssociatedFunction();
1903     if (!F || F->isDeclaration())
1904       indicatePessimisticFixpoint();
1905   }
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     // TODO: Once we have call site specific value information we can provide
1910     //       call site specific liveness information and then it makes
1911     //       sense to specialize attributes for call sites arguments instead of
1912     //       redirecting requests to the callee argument.
1913     Function *F = getAssociatedFunction();
1914     const IRPosition &FnPos = IRPosition::function(*F);
1915     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1916     return clampStateAndIndicateChange(getState(), FnAA.getState());
1917   }
1918 
1919   /// See AbstractAttribute::trackStatistics()
1920   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1921 };
1922 
1923 /// -------------------- Undefined-Behavior Attributes ------------------------
1924 
1925 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1926   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1927       : AAUndefinedBehavior(IRP, A) {}
1928 
1929   /// See AbstractAttribute::updateImpl(...).
1930   // through a pointer (i.e. also branches etc.)
1931   ChangeStatus updateImpl(Attributor &A) override {
1932     const size_t UBPrevSize = KnownUBInsts.size();
1933     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1934 
1935     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1936       // Skip instructions that are already saved.
1937       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1938         return true;
1939 
1940       // If we reach here, we know we have an instruction
1941       // that accesses memory through a pointer operand,
1942       // for which getPointerOperand() should give it to us.
1943       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1944       assert(PtrOp &&
1945              "Expected pointer operand of memory accessing instruction");
1946 
1947       // Either we stopped and the appropriate action was taken,
1948       // or we got back a simplified value to continue.
1949       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1950       if (!SimplifiedPtrOp.hasValue())
1951         return true;
1952       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1953 
1954       // A memory access through a pointer is considered UB
1955       // only if the pointer has constant null value.
1956       // TODO: Expand it to not only check constant values.
1957       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1958         AssumedNoUBInsts.insert(&I);
1959         return true;
1960       }
1961       const Type *PtrTy = PtrOpVal->getType();
1962 
1963       // Because we only consider instructions inside functions,
1964       // assume that a parent function exists.
1965       const Function *F = I.getFunction();
1966 
1967       // A memory access using constant null pointer is only considered UB
1968       // if null pointer is _not_ defined for the target platform.
1969       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1970         AssumedNoUBInsts.insert(&I);
1971       else
1972         KnownUBInsts.insert(&I);
1973       return true;
1974     };
1975 
1976     auto InspectBrInstForUB = [&](Instruction &I) {
1977       // A conditional branch instruction is considered UB if it has `undef`
1978       // condition.
1979 
1980       // Skip instructions that are already saved.
1981       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1982         return true;
1983 
1984       // We know we have a branch instruction.
1985       auto BrInst = cast<BranchInst>(&I);
1986 
1987       // Unconditional branches are never considered UB.
1988       if (BrInst->isUnconditional())
1989         return true;
1990 
1991       // Either we stopped and the appropriate action was taken,
1992       // or we got back a simplified value to continue.
1993       Optional<Value *> SimplifiedCond =
1994           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1995       if (!SimplifiedCond.hasValue())
1996         return true;
1997       AssumedNoUBInsts.insert(&I);
1998       return true;
1999     };
2000 
2001     auto InspectCallSiteForUB = [&](Instruction &I) {
2002       // Check whether a callsite always cause UB or not
2003 
2004       // Skip instructions that are already saved.
2005       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2006         return true;
2007 
2008       // Check nonnull and noundef argument attribute violation for each
2009       // callsite.
2010       CallBase &CB = cast<CallBase>(I);
2011       Function *Callee = CB.getCalledFunction();
2012       if (!Callee)
2013         return true;
2014       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2015         // If current argument is known to be simplified to null pointer and the
2016         // corresponding argument position is known to have nonnull attribute,
2017         // the argument is poison. Furthermore, if the argument is poison and
2018         // the position is known to have noundef attriubte, this callsite is
2019         // considered UB.
2020         if (idx >= Callee->arg_size())
2021           break;
2022         Value *ArgVal = CB.getArgOperand(idx);
2023         if (!ArgVal)
2024           continue;
2025         // Here, we handle three cases.
2026         //   (1) Not having a value means it is dead. (we can replace the value
2027         //       with undef)
2028         //   (2) Simplified to undef. The argument violate noundef attriubte.
2029         //   (3) Simplified to null pointer where known to be nonnull.
2030         //       The argument is a poison value and violate noundef attribute.
2031         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2032         auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP,
2033                                                 /* TrackDependence */ false);
2034         if (!NoUndefAA.isKnownNoUndef())
2035           continue;
2036         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2037             *this, IRPosition::value(*ArgVal), /* TrackDependence */ false);
2038         if (!ValueSimplifyAA.isKnown())
2039           continue;
2040         Optional<Value *> SimplifiedVal =
2041             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2042         if (!SimplifiedVal.hasValue() ||
2043             isa<UndefValue>(*SimplifiedVal.getValue())) {
2044           KnownUBInsts.insert(&I);
2045           continue;
2046         }
2047         if (!ArgVal->getType()->isPointerTy() ||
2048             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2049           continue;
2050         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP,
2051                                                 /* TrackDependence */ false);
2052         if (NonNullAA.isKnownNonNull())
2053           KnownUBInsts.insert(&I);
2054       }
2055       return true;
2056     };
2057 
2058     auto InspectReturnInstForUB =
2059         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2060           // Check if a return instruction always cause UB or not
2061           // Note: It is guaranteed that the returned position of the anchor
2062           //       scope has noundef attribute when this is called.
2063           //       We also ensure the return position is not "assumed dead"
2064           //       because the returned value was then potentially simplified to
2065           //       `undef` in AAReturnedValues without removing the `noundef`
2066           //       attribute yet.
2067 
2068           // When the returned position has noundef attriubte, UB occur in the
2069           // following cases.
2070           //   (1) Returned value is known to be undef.
2071           //   (2) The value is known to be a null pointer and the returned
2072           //       position has nonnull attribute (because the returned value is
2073           //       poison).
2074           bool FoundUB = false;
2075           if (isa<UndefValue>(V)) {
2076             FoundUB = true;
2077           } else {
2078             if (isa<ConstantPointerNull>(V)) {
2079               auto &NonNullAA = A.getAAFor<AANonNull>(
2080                   *this, IRPosition::returned(*getAnchorScope()),
2081                   /* TrackDependence */ false);
2082               if (NonNullAA.isKnownNonNull())
2083                 FoundUB = true;
2084             }
2085           }
2086 
2087           if (FoundUB)
2088             for (ReturnInst *RI : RetInsts)
2089               KnownUBInsts.insert(RI);
2090           return true;
2091         };
2092 
2093     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2094                               {Instruction::Load, Instruction::Store,
2095                                Instruction::AtomicCmpXchg,
2096                                Instruction::AtomicRMW},
2097                               /* CheckBBLivenessOnly */ true);
2098     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2099                               /* CheckBBLivenessOnly */ true);
2100     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2101 
2102     // If the returned position of the anchor scope has noundef attriubte, check
2103     // all returned instructions.
2104     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2105       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2106       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2107         auto &RetPosNoUndefAA =
2108             A.getAAFor<AANoUndef>(*this, ReturnIRP,
2109                                   /* TrackDependence */ false);
2110         if (RetPosNoUndefAA.isKnownNoUndef())
2111           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2112                                                     *this);
2113       }
2114     }
2115 
2116     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2117         UBPrevSize != KnownUBInsts.size())
2118       return ChangeStatus::CHANGED;
2119     return ChangeStatus::UNCHANGED;
2120   }
2121 
2122   bool isKnownToCauseUB(Instruction *I) const override {
2123     return KnownUBInsts.count(I);
2124   }
2125 
2126   bool isAssumedToCauseUB(Instruction *I) const override {
2127     // In simple words, if an instruction is not in the assumed to _not_
2128     // cause UB, then it is assumed UB (that includes those
2129     // in the KnownUBInsts set). The rest is boilerplate
2130     // is to ensure that it is one of the instructions we test
2131     // for UB.
2132 
2133     switch (I->getOpcode()) {
2134     case Instruction::Load:
2135     case Instruction::Store:
2136     case Instruction::AtomicCmpXchg:
2137     case Instruction::AtomicRMW:
2138       return !AssumedNoUBInsts.count(I);
2139     case Instruction::Br: {
2140       auto BrInst = cast<BranchInst>(I);
2141       if (BrInst->isUnconditional())
2142         return false;
2143       return !AssumedNoUBInsts.count(I);
2144     } break;
2145     default:
2146       return false;
2147     }
2148     return false;
2149   }
2150 
2151   ChangeStatus manifest(Attributor &A) override {
2152     if (KnownUBInsts.empty())
2153       return ChangeStatus::UNCHANGED;
2154     for (Instruction *I : KnownUBInsts)
2155       A.changeToUnreachableAfterManifest(I);
2156     return ChangeStatus::CHANGED;
2157   }
2158 
2159   /// See AbstractAttribute::getAsStr()
2160   const std::string getAsStr() const override {
2161     return getAssumed() ? "undefined-behavior" : "no-ub";
2162   }
2163 
2164   /// Note: The correctness of this analysis depends on the fact that the
2165   /// following 2 sets will stop changing after some point.
2166   /// "Change" here means that their size changes.
2167   /// The size of each set is monotonically increasing
2168   /// (we only add items to them) and it is upper bounded by the number of
2169   /// instructions in the processed function (we can never save more
2170   /// elements in either set than this number). Hence, at some point,
2171   /// they will stop increasing.
2172   /// Consequently, at some point, both sets will have stopped
2173   /// changing, effectively making the analysis reach a fixpoint.
2174 
2175   /// Note: These 2 sets are disjoint and an instruction can be considered
2176   /// one of 3 things:
2177   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2178   ///    the KnownUBInsts set.
2179   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2180   ///    has a reason to assume it).
2181   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2182   ///    could not find a reason to assume or prove that it can cause UB,
2183   ///    hence it assumes it doesn't. We have a set for these instructions
2184   ///    so that we don't reprocess them in every update.
2185   ///    Note however that instructions in this set may cause UB.
2186 
2187 protected:
2188   /// A set of all live instructions _known_ to cause UB.
2189   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2190 
2191 private:
2192   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2193   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2194 
2195   // Should be called on updates in which if we're processing an instruction
2196   // \p I that depends on a value \p V, one of the following has to happen:
2197   // - If the value is assumed, then stop.
2198   // - If the value is known but undef, then consider it UB.
2199   // - Otherwise, do specific processing with the simplified value.
2200   // We return None in the first 2 cases to signify that an appropriate
2201   // action was taken and the caller should stop.
2202   // Otherwise, we return the simplified value that the caller should
2203   // use for specific processing.
2204   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2205                                          Instruction *I) {
2206     const auto &ValueSimplifyAA =
2207         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2208     Optional<Value *> SimplifiedV =
2209         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2210     if (!ValueSimplifyAA.isKnown()) {
2211       // Don't depend on assumed values.
2212       return llvm::None;
2213     }
2214     if (!SimplifiedV.hasValue()) {
2215       // If it is known (which we tested above) but it doesn't have a value,
2216       // then we can assume `undef` and hence the instruction is UB.
2217       KnownUBInsts.insert(I);
2218       return llvm::None;
2219     }
2220     Value *Val = SimplifiedV.getValue();
2221     if (isa<UndefValue>(Val)) {
2222       KnownUBInsts.insert(I);
2223       return llvm::None;
2224     }
2225     return Val;
2226   }
2227 };
2228 
2229 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2230   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2231       : AAUndefinedBehaviorImpl(IRP, A) {}
2232 
2233   /// See AbstractAttribute::trackStatistics()
2234   void trackStatistics() const override {
2235     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2236                "Number of instructions known to have UB");
2237     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2238         KnownUBInsts.size();
2239   }
2240 };
2241 
2242 /// ------------------------ Will-Return Attributes ----------------------------
2243 
2244 // Helper function that checks whether a function has any cycle which we don't
2245 // know if it is bounded or not.
2246 // Loops with maximum trip count are considered bounded, any other cycle not.
2247 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2248   ScalarEvolution *SE =
2249       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2250   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2251   // If either SCEV or LoopInfo is not available for the function then we assume
2252   // any cycle to be unbounded cycle.
2253   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2254   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2255   if (!SE || !LI) {
2256     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2257       if (SCCI.hasCycle())
2258         return true;
2259     return false;
2260   }
2261 
2262   // If there's irreducible control, the function may contain non-loop cycles.
2263   if (mayContainIrreducibleControl(F, LI))
2264     return true;
2265 
2266   // Any loop that does not have a max trip count is considered unbounded cycle.
2267   for (auto *L : LI->getLoopsInPreorder()) {
2268     if (!SE->getSmallConstantMaxTripCount(L))
2269       return true;
2270   }
2271   return false;
2272 }
2273 
2274 struct AAWillReturnImpl : public AAWillReturn {
2275   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2276       : AAWillReturn(IRP, A) {}
2277 
2278   /// See AbstractAttribute::initialize(...).
2279   void initialize(Attributor &A) override {
2280     AAWillReturn::initialize(A);
2281 
2282     Function *F = getAnchorScope();
2283     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2284       indicatePessimisticFixpoint();
2285   }
2286 
2287   /// See AbstractAttribute::updateImpl(...).
2288   ChangeStatus updateImpl(Attributor &A) override {
2289     auto CheckForWillReturn = [&](Instruction &I) {
2290       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2291       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2292       if (WillReturnAA.isKnownWillReturn())
2293         return true;
2294       if (!WillReturnAA.isAssumedWillReturn())
2295         return false;
2296       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2297       return NoRecurseAA.isAssumedNoRecurse();
2298     };
2299 
2300     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2301       return indicatePessimisticFixpoint();
2302 
2303     return ChangeStatus::UNCHANGED;
2304   }
2305 
2306   /// See AbstractAttribute::getAsStr()
2307   const std::string getAsStr() const override {
2308     return getAssumed() ? "willreturn" : "may-noreturn";
2309   }
2310 };
2311 
2312 struct AAWillReturnFunction final : AAWillReturnImpl {
2313   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2314       : AAWillReturnImpl(IRP, A) {}
2315 
2316   /// See AbstractAttribute::trackStatistics()
2317   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2318 };
2319 
2320 /// WillReturn attribute deduction for a call sites.
2321 struct AAWillReturnCallSite final : AAWillReturnImpl {
2322   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2323       : AAWillReturnImpl(IRP, A) {}
2324 
2325   /// See AbstractAttribute::initialize(...).
2326   void initialize(Attributor &A) override {
2327     AAWillReturn::initialize(A);
2328     Function *F = getAssociatedFunction();
2329     if (!F || !A.isFunctionIPOAmendable(*F))
2330       indicatePessimisticFixpoint();
2331   }
2332 
2333   /// See AbstractAttribute::updateImpl(...).
2334   ChangeStatus updateImpl(Attributor &A) override {
2335     // TODO: Once we have call site specific value information we can provide
2336     //       call site specific liveness information and then it makes
2337     //       sense to specialize attributes for call sites arguments instead of
2338     //       redirecting requests to the callee argument.
2339     Function *F = getAssociatedFunction();
2340     const IRPosition &FnPos = IRPosition::function(*F);
2341     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2342     return clampStateAndIndicateChange(getState(), FnAA.getState());
2343   }
2344 
2345   /// See AbstractAttribute::trackStatistics()
2346   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2347 };
2348 
2349 /// -------------------AAReachability Attribute--------------------------
2350 
2351 struct AAReachabilityImpl : AAReachability {
2352   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2353       : AAReachability(IRP, A) {}
2354 
2355   const std::string getAsStr() const override {
2356     // TODO: Return the number of reachable queries.
2357     return "reachable";
2358   }
2359 
2360   /// See AbstractAttribute::initialize(...).
2361   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2362 
2363   /// See AbstractAttribute::updateImpl(...).
2364   ChangeStatus updateImpl(Attributor &A) override {
2365     return indicatePessimisticFixpoint();
2366   }
2367 };
2368 
2369 struct AAReachabilityFunction final : public AAReachabilityImpl {
2370   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2371       : AAReachabilityImpl(IRP, A) {}
2372 
2373   /// See AbstractAttribute::trackStatistics()
2374   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2375 };
2376 
2377 /// ------------------------ NoAlias Argument Attribute ------------------------
2378 
2379 struct AANoAliasImpl : AANoAlias {
2380   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2381     assert(getAssociatedType()->isPointerTy() &&
2382            "Noalias is a pointer attribute");
2383   }
2384 
2385   const std::string getAsStr() const override {
2386     return getAssumed() ? "noalias" : "may-alias";
2387   }
2388 };
2389 
2390 /// NoAlias attribute for a floating value.
2391 struct AANoAliasFloating final : AANoAliasImpl {
2392   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2393       : AANoAliasImpl(IRP, A) {}
2394 
2395   /// See AbstractAttribute::initialize(...).
2396   void initialize(Attributor &A) override {
2397     AANoAliasImpl::initialize(A);
2398     Value *Val = &getAssociatedValue();
2399     do {
2400       CastInst *CI = dyn_cast<CastInst>(Val);
2401       if (!CI)
2402         break;
2403       Value *Base = CI->getOperand(0);
2404       if (!Base->hasOneUse())
2405         break;
2406       Val = Base;
2407     } while (true);
2408 
2409     if (!Val->getType()->isPointerTy()) {
2410       indicatePessimisticFixpoint();
2411       return;
2412     }
2413 
2414     if (isa<AllocaInst>(Val))
2415       indicateOptimisticFixpoint();
2416     else if (isa<ConstantPointerNull>(Val) &&
2417              !NullPointerIsDefined(getAnchorScope(),
2418                                    Val->getType()->getPointerAddressSpace()))
2419       indicateOptimisticFixpoint();
2420     else if (Val != &getAssociatedValue()) {
2421       const auto &ValNoAliasAA =
2422           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2423       if (ValNoAliasAA.isKnownNoAlias())
2424         indicateOptimisticFixpoint();
2425     }
2426   }
2427 
2428   /// See AbstractAttribute::updateImpl(...).
2429   ChangeStatus updateImpl(Attributor &A) override {
2430     // TODO: Implement this.
2431     return indicatePessimisticFixpoint();
2432   }
2433 
2434   /// See AbstractAttribute::trackStatistics()
2435   void trackStatistics() const override {
2436     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2437   }
2438 };
2439 
2440 /// NoAlias attribute for an argument.
2441 struct AANoAliasArgument final
2442     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2443   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2444   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2445 
2446   /// See AbstractAttribute::initialize(...).
2447   void initialize(Attributor &A) override {
2448     Base::initialize(A);
2449     // See callsite argument attribute and callee argument attribute.
2450     if (hasAttr({Attribute::ByVal}))
2451       indicateOptimisticFixpoint();
2452   }
2453 
2454   /// See AbstractAttribute::update(...).
2455   ChangeStatus updateImpl(Attributor &A) override {
2456     // We have to make sure no-alias on the argument does not break
2457     // synchronization when this is a callback argument, see also [1] below.
2458     // If synchronization cannot be affected, we delegate to the base updateImpl
2459     // function, otherwise we give up for now.
2460 
2461     // If the function is no-sync, no-alias cannot break synchronization.
2462     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2463         *this, IRPosition::function_scope(getIRPosition()));
2464     if (NoSyncAA.isAssumedNoSync())
2465       return Base::updateImpl(A);
2466 
2467     // If the argument is read-only, no-alias cannot break synchronization.
2468     const auto &MemBehaviorAA =
2469         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2470     if (MemBehaviorAA.isAssumedReadOnly())
2471       return Base::updateImpl(A);
2472 
2473     // If the argument is never passed through callbacks, no-alias cannot break
2474     // synchronization.
2475     bool AllCallSitesKnown;
2476     if (A.checkForAllCallSites(
2477             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2478             true, AllCallSitesKnown))
2479       return Base::updateImpl(A);
2480 
2481     // TODO: add no-alias but make sure it doesn't break synchronization by
2482     // introducing fake uses. See:
2483     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2484     //     International Workshop on OpenMP 2018,
2485     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2486 
2487     return indicatePessimisticFixpoint();
2488   }
2489 
2490   /// See AbstractAttribute::trackStatistics()
2491   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2492 };
2493 
2494 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2495   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2496       : AANoAliasImpl(IRP, A) {}
2497 
2498   /// See AbstractAttribute::initialize(...).
2499   void initialize(Attributor &A) override {
2500     // See callsite argument attribute and callee argument attribute.
2501     const auto &CB = cast<CallBase>(getAnchorValue());
2502     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2503       indicateOptimisticFixpoint();
2504     Value &Val = getAssociatedValue();
2505     if (isa<ConstantPointerNull>(Val) &&
2506         !NullPointerIsDefined(getAnchorScope(),
2507                               Val.getType()->getPointerAddressSpace()))
2508       indicateOptimisticFixpoint();
2509   }
2510 
2511   /// Determine if the underlying value may alias with the call site argument
2512   /// \p OtherArgNo of \p ICS (= the underlying call site).
2513   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2514                             const AAMemoryBehavior &MemBehaviorAA,
2515                             const CallBase &CB, unsigned OtherArgNo) {
2516     // We do not need to worry about aliasing with the underlying IRP.
2517     if (this->getCalleeArgNo() == (int)OtherArgNo)
2518       return false;
2519 
2520     // If it is not a pointer or pointer vector we do not alias.
2521     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2522     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2523       return false;
2524 
2525     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2526         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2527         /* TrackDependence */ false);
2528 
2529     // If the argument is readnone, there is no read-write aliasing.
2530     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2531       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2532       return false;
2533     }
2534 
2535     // If the argument is readonly and the underlying value is readonly, there
2536     // is no read-write aliasing.
2537     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2538     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2539       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2540       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2541       return false;
2542     }
2543 
2544     // We have to utilize actual alias analysis queries so we need the object.
2545     if (!AAR)
2546       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2547 
2548     // Try to rule it out at the call site.
2549     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2550     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2551                          "callsite arguments: "
2552                       << getAssociatedValue() << " " << *ArgOp << " => "
2553                       << (IsAliasing ? "" : "no-") << "alias \n");
2554 
2555     return IsAliasing;
2556   }
2557 
2558   bool
2559   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2560                                          const AAMemoryBehavior &MemBehaviorAA,
2561                                          const AANoAlias &NoAliasAA) {
2562     // We can deduce "noalias" if the following conditions hold.
2563     // (i)   Associated value is assumed to be noalias in the definition.
2564     // (ii)  Associated value is assumed to be no-capture in all the uses
2565     //       possibly executed before this callsite.
2566     // (iii) There is no other pointer argument which could alias with the
2567     //       value.
2568 
2569     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2570     if (!AssociatedValueIsNoAliasAtDef) {
2571       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2572                         << " is not no-alias at the definition\n");
2573       return false;
2574     }
2575 
2576     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2577 
2578     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2579     const Function *ScopeFn = VIRP.getAnchorScope();
2580     auto &NoCaptureAA =
2581         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2582     // Check whether the value is captured in the scope using AANoCapture.
2583     //      Look at CFG and check only uses possibly executed before this
2584     //      callsite.
2585     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2586       Instruction *UserI = cast<Instruction>(U.getUser());
2587 
2588       // If UserI is the curr instruction and there is a single potential use of
2589       // the value in UserI we allow the use.
2590       // TODO: We should inspect the operands and allow those that cannot alias
2591       //       with the value.
2592       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2593         return true;
2594 
2595       if (ScopeFn) {
2596         const auto &ReachabilityAA =
2597             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2598 
2599         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2600           return true;
2601 
2602         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2603           if (CB->isArgOperand(&U)) {
2604 
2605             unsigned ArgNo = CB->getArgOperandNo(&U);
2606 
2607             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2608                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2609 
2610             if (NoCaptureAA.isAssumedNoCapture())
2611               return true;
2612           }
2613         }
2614       }
2615 
2616       // For cases which can potentially have more users
2617       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2618           isa<SelectInst>(U)) {
2619         Follow = true;
2620         return true;
2621       }
2622 
2623       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2624       return false;
2625     };
2626 
2627     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2628       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2629         LLVM_DEBUG(
2630             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2631                    << " cannot be noalias as it is potentially captured\n");
2632         return false;
2633       }
2634     }
2635     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2636 
2637     // Check there is no other pointer argument which could alias with the
2638     // value passed at this call site.
2639     // TODO: AbstractCallSite
2640     const auto &CB = cast<CallBase>(getAnchorValue());
2641     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2642          OtherArgNo++)
2643       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2644         return false;
2645 
2646     return true;
2647   }
2648 
2649   /// See AbstractAttribute::updateImpl(...).
2650   ChangeStatus updateImpl(Attributor &A) override {
2651     // If the argument is readnone we are done as there are no accesses via the
2652     // argument.
2653     auto &MemBehaviorAA =
2654         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2655                                      /* TrackDependence */ false);
2656     if (MemBehaviorAA.isAssumedReadNone()) {
2657       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2658       return ChangeStatus::UNCHANGED;
2659     }
2660 
2661     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2662     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2663                                                   /* TrackDependence */ false);
2664 
2665     AAResults *AAR = nullptr;
2666     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2667                                                NoAliasAA)) {
2668       LLVM_DEBUG(
2669           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2670       return ChangeStatus::UNCHANGED;
2671     }
2672 
2673     return indicatePessimisticFixpoint();
2674   }
2675 
2676   /// See AbstractAttribute::trackStatistics()
2677   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2678 };
2679 
2680 /// NoAlias attribute for function return value.
2681 struct AANoAliasReturned final : AANoAliasImpl {
2682   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2683       : AANoAliasImpl(IRP, A) {}
2684 
2685   /// See AbstractAttribute::initialize(...).
2686   void initialize(Attributor &A) override {
2687     AANoAliasImpl::initialize(A);
2688     Function *F = getAssociatedFunction();
2689     if (!F || F->isDeclaration())
2690       indicatePessimisticFixpoint();
2691   }
2692 
2693   /// See AbstractAttribute::updateImpl(...).
2694   virtual ChangeStatus updateImpl(Attributor &A) override {
2695 
2696     auto CheckReturnValue = [&](Value &RV) -> bool {
2697       if (Constant *C = dyn_cast<Constant>(&RV))
2698         if (C->isNullValue() || isa<UndefValue>(C))
2699           return true;
2700 
2701       /// For now, we can only deduce noalias if we have call sites.
2702       /// FIXME: add more support.
2703       if (!isa<CallBase>(&RV))
2704         return false;
2705 
2706       const IRPosition &RVPos = IRPosition::value(RV);
2707       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2708       if (!NoAliasAA.isAssumedNoAlias())
2709         return false;
2710 
2711       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2712       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2713     };
2714 
2715     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2716       return indicatePessimisticFixpoint();
2717 
2718     return ChangeStatus::UNCHANGED;
2719   }
2720 
2721   /// See AbstractAttribute::trackStatistics()
2722   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2723 };
2724 
2725 /// NoAlias attribute deduction for a call site return value.
2726 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2727   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2728       : AANoAliasImpl(IRP, A) {}
2729 
2730   /// See AbstractAttribute::initialize(...).
2731   void initialize(Attributor &A) override {
2732     AANoAliasImpl::initialize(A);
2733     Function *F = getAssociatedFunction();
2734     if (!F || F->isDeclaration())
2735       indicatePessimisticFixpoint();
2736   }
2737 
2738   /// See AbstractAttribute::updateImpl(...).
2739   ChangeStatus updateImpl(Attributor &A) override {
2740     // TODO: Once we have call site specific value information we can provide
2741     //       call site specific liveness information and then it makes
2742     //       sense to specialize attributes for call sites arguments instead of
2743     //       redirecting requests to the callee argument.
2744     Function *F = getAssociatedFunction();
2745     const IRPosition &FnPos = IRPosition::returned(*F);
2746     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2747     return clampStateAndIndicateChange(getState(), FnAA.getState());
2748   }
2749 
2750   /// See AbstractAttribute::trackStatistics()
2751   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2752 };
2753 
2754 /// -------------------AAIsDead Function Attribute-----------------------
2755 
2756 struct AAIsDeadValueImpl : public AAIsDead {
2757   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2758 
2759   /// See AAIsDead::isAssumedDead().
2760   bool isAssumedDead() const override { return getAssumed(); }
2761 
2762   /// See AAIsDead::isKnownDead().
2763   bool isKnownDead() const override { return getKnown(); }
2764 
2765   /// See AAIsDead::isAssumedDead(BasicBlock *).
2766   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2767 
2768   /// See AAIsDead::isKnownDead(BasicBlock *).
2769   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2770 
2771   /// See AAIsDead::isAssumedDead(Instruction *I).
2772   bool isAssumedDead(const Instruction *I) const override {
2773     return I == getCtxI() && isAssumedDead();
2774   }
2775 
2776   /// See AAIsDead::isKnownDead(Instruction *I).
2777   bool isKnownDead(const Instruction *I) const override {
2778     return isAssumedDead(I) && getKnown();
2779   }
2780 
2781   /// See AbstractAttribute::getAsStr().
2782   const std::string getAsStr() const override {
2783     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2784   }
2785 
2786   /// Check if all uses are assumed dead.
2787   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2788     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2789     // Explicitly set the dependence class to required because we want a long
2790     // chain of N dependent instructions to be considered live as soon as one is
2791     // without going through N update cycles. This is not required for
2792     // correctness.
2793     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2794   }
2795 
2796   /// Determine if \p I is assumed to be side-effect free.
2797   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2798     if (!I || wouldInstructionBeTriviallyDead(I))
2799       return true;
2800 
2801     auto *CB = dyn_cast<CallBase>(I);
2802     if (!CB || isa<IntrinsicInst>(CB))
2803       return false;
2804 
2805     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2806     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2807         *this, CallIRP, /* TrackDependence */ false);
2808     if (!NoUnwindAA.isAssumedNoUnwind())
2809       return false;
2810     if (!NoUnwindAA.isKnownNoUnwind())
2811       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2812 
2813     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2814         *this, CallIRP, /* TrackDependence */ false);
2815     if (MemBehaviorAA.isAssumedReadOnly()) {
2816       if (!MemBehaviorAA.isKnownReadOnly())
2817         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2818       return true;
2819     }
2820     return false;
2821   }
2822 };
2823 
2824 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2825   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2826       : AAIsDeadValueImpl(IRP, A) {}
2827 
2828   /// See AbstractAttribute::initialize(...).
2829   void initialize(Attributor &A) override {
2830     if (isa<UndefValue>(getAssociatedValue())) {
2831       indicatePessimisticFixpoint();
2832       return;
2833     }
2834 
2835     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2836     if (!isAssumedSideEffectFree(A, I))
2837       indicatePessimisticFixpoint();
2838   }
2839 
2840   /// See AbstractAttribute::updateImpl(...).
2841   ChangeStatus updateImpl(Attributor &A) override {
2842     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2843     if (!isAssumedSideEffectFree(A, I))
2844       return indicatePessimisticFixpoint();
2845 
2846     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2847       return indicatePessimisticFixpoint();
2848     return ChangeStatus::UNCHANGED;
2849   }
2850 
2851   /// See AbstractAttribute::manifest(...).
2852   ChangeStatus manifest(Attributor &A) override {
2853     Value &V = getAssociatedValue();
2854     if (auto *I = dyn_cast<Instruction>(&V)) {
2855       // If we get here we basically know the users are all dead. We check if
2856       // isAssumedSideEffectFree returns true here again because it might not be
2857       // the case and only the users are dead but the instruction (=call) is
2858       // still needed.
2859       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2860         A.deleteAfterManifest(*I);
2861         return ChangeStatus::CHANGED;
2862       }
2863     }
2864     if (V.use_empty())
2865       return ChangeStatus::UNCHANGED;
2866 
2867     bool UsedAssumedInformation = false;
2868     Optional<Constant *> C =
2869         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2870     if (C.hasValue() && C.getValue())
2871       return ChangeStatus::UNCHANGED;
2872 
2873     // Replace the value with undef as it is dead but keep droppable uses around
2874     // as they provide information we don't want to give up on just yet.
2875     UndefValue &UV = *UndefValue::get(V.getType());
2876     bool AnyChange =
2877         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2878     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2879   }
2880 
2881   /// See AbstractAttribute::trackStatistics()
2882   void trackStatistics() const override {
2883     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2884   }
2885 };
2886 
2887 struct AAIsDeadArgument : public AAIsDeadFloating {
2888   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2889       : AAIsDeadFloating(IRP, A) {}
2890 
2891   /// See AbstractAttribute::initialize(...).
2892   void initialize(Attributor &A) override {
2893     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2894       indicatePessimisticFixpoint();
2895   }
2896 
2897   /// See AbstractAttribute::manifest(...).
2898   ChangeStatus manifest(Attributor &A) override {
2899     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2900     Argument &Arg = *getAssociatedArgument();
2901     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2902       if (A.registerFunctionSignatureRewrite(
2903               Arg, /* ReplacementTypes */ {},
2904               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2905               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2906         Arg.dropDroppableUses();
2907         return ChangeStatus::CHANGED;
2908       }
2909     return Changed;
2910   }
2911 
2912   /// See AbstractAttribute::trackStatistics()
2913   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2914 };
2915 
2916 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2917   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2918       : AAIsDeadValueImpl(IRP, A) {}
2919 
2920   /// See AbstractAttribute::initialize(...).
2921   void initialize(Attributor &A) override {
2922     if (isa<UndefValue>(getAssociatedValue()))
2923       indicatePessimisticFixpoint();
2924   }
2925 
2926   /// See AbstractAttribute::updateImpl(...).
2927   ChangeStatus updateImpl(Attributor &A) override {
2928     // TODO: Once we have call site specific value information we can provide
2929     //       call site specific liveness information and then it makes
2930     //       sense to specialize attributes for call sites arguments instead of
2931     //       redirecting requests to the callee argument.
2932     Argument *Arg = getAssociatedArgument();
2933     if (!Arg)
2934       return indicatePessimisticFixpoint();
2935     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2936     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2937     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2938   }
2939 
2940   /// See AbstractAttribute::manifest(...).
2941   ChangeStatus manifest(Attributor &A) override {
2942     CallBase &CB = cast<CallBase>(getAnchorValue());
2943     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2944     assert(!isa<UndefValue>(U.get()) &&
2945            "Expected undef values to be filtered out!");
2946     UndefValue &UV = *UndefValue::get(U->getType());
2947     if (A.changeUseAfterManifest(U, UV))
2948       return ChangeStatus::CHANGED;
2949     return ChangeStatus::UNCHANGED;
2950   }
2951 
2952   /// See AbstractAttribute::trackStatistics()
2953   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2954 };
2955 
2956 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2957   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2958       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2959 
2960   /// See AAIsDead::isAssumedDead().
2961   bool isAssumedDead() const override {
2962     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2963   }
2964 
2965   /// See AbstractAttribute::initialize(...).
2966   void initialize(Attributor &A) override {
2967     if (isa<UndefValue>(getAssociatedValue())) {
2968       indicatePessimisticFixpoint();
2969       return;
2970     }
2971 
2972     // We track this separately as a secondary state.
2973     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2974   }
2975 
2976   /// See AbstractAttribute::updateImpl(...).
2977   ChangeStatus updateImpl(Attributor &A) override {
2978     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2979     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2980       IsAssumedSideEffectFree = false;
2981       Changed = ChangeStatus::CHANGED;
2982     }
2983 
2984     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2985       return indicatePessimisticFixpoint();
2986     return Changed;
2987   }
2988 
2989   /// See AbstractAttribute::trackStatistics()
2990   void trackStatistics() const override {
2991     if (IsAssumedSideEffectFree)
2992       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2993     else
2994       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2995   }
2996 
2997   /// See AbstractAttribute::getAsStr().
2998   const std::string getAsStr() const override {
2999     return isAssumedDead()
3000                ? "assumed-dead"
3001                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3002   }
3003 
3004 private:
3005   bool IsAssumedSideEffectFree;
3006 };
3007 
3008 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3009   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3010       : AAIsDeadValueImpl(IRP, A) {}
3011 
3012   /// See AbstractAttribute::updateImpl(...).
3013   ChangeStatus updateImpl(Attributor &A) override {
3014 
3015     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3016                               {Instruction::Ret});
3017 
3018     auto PredForCallSite = [&](AbstractCallSite ACS) {
3019       if (ACS.isCallbackCall() || !ACS.getInstruction())
3020         return false;
3021       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3022     };
3023 
3024     bool AllCallSitesKnown;
3025     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3026                                 AllCallSitesKnown))
3027       return indicatePessimisticFixpoint();
3028 
3029     return ChangeStatus::UNCHANGED;
3030   }
3031 
3032   /// See AbstractAttribute::manifest(...).
3033   ChangeStatus manifest(Attributor &A) override {
3034     // TODO: Rewrite the signature to return void?
3035     bool AnyChange = false;
3036     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3037     auto RetInstPred = [&](Instruction &I) {
3038       ReturnInst &RI = cast<ReturnInst>(I);
3039       if (!isa<UndefValue>(RI.getReturnValue()))
3040         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3041       return true;
3042     };
3043     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3044     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3045   }
3046 
3047   /// See AbstractAttribute::trackStatistics()
3048   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3049 };
3050 
3051 struct AAIsDeadFunction : public AAIsDead {
3052   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3053 
3054   /// See AbstractAttribute::initialize(...).
3055   void initialize(Attributor &A) override {
3056     const Function *F = getAnchorScope();
3057     if (F && !F->isDeclaration()) {
3058       // We only want to compute liveness once. If the function is not part of
3059       // the SCC, skip it.
3060       if (A.isRunOn(*const_cast<Function *>(F))) {
3061         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3062         assumeLive(A, F->getEntryBlock());
3063       } else {
3064         indicatePessimisticFixpoint();
3065       }
3066     }
3067   }
3068 
3069   /// See AbstractAttribute::getAsStr().
3070   const std::string getAsStr() const override {
3071     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3072            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3073            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3074            std::to_string(KnownDeadEnds.size()) + "]";
3075   }
3076 
3077   /// See AbstractAttribute::manifest(...).
3078   ChangeStatus manifest(Attributor &A) override {
3079     assert(getState().isValidState() &&
3080            "Attempted to manifest an invalid state!");
3081 
3082     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3083     Function &F = *getAnchorScope();
3084 
3085     if (AssumedLiveBlocks.empty()) {
3086       A.deleteAfterManifest(F);
3087       return ChangeStatus::CHANGED;
3088     }
3089 
3090     // Flag to determine if we can change an invoke to a call assuming the
3091     // callee is nounwind. This is not possible if the personality of the
3092     // function allows to catch asynchronous exceptions.
3093     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3094 
3095     KnownDeadEnds.set_union(ToBeExploredFrom);
3096     for (const Instruction *DeadEndI : KnownDeadEnds) {
3097       auto *CB = dyn_cast<CallBase>(DeadEndI);
3098       if (!CB)
3099         continue;
3100       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3101           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3102           DepClassTy::OPTIONAL);
3103       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3104       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3105         continue;
3106 
3107       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3108         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3109       else
3110         A.changeToUnreachableAfterManifest(
3111             const_cast<Instruction *>(DeadEndI->getNextNode()));
3112       HasChanged = ChangeStatus::CHANGED;
3113     }
3114 
3115     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3116     for (BasicBlock &BB : F)
3117       if (!AssumedLiveBlocks.count(&BB)) {
3118         A.deleteAfterManifest(BB);
3119         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3120       }
3121 
3122     return HasChanged;
3123   }
3124 
3125   /// See AbstractAttribute::updateImpl(...).
3126   ChangeStatus updateImpl(Attributor &A) override;
3127 
3128   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3129     return !AssumedLiveEdges.count(std::make_pair(From, To));
3130   }
3131 
3132   /// See AbstractAttribute::trackStatistics()
3133   void trackStatistics() const override {}
3134 
3135   /// Returns true if the function is assumed dead.
3136   bool isAssumedDead() const override { return false; }
3137 
3138   /// See AAIsDead::isKnownDead().
3139   bool isKnownDead() const override { return false; }
3140 
3141   /// See AAIsDead::isAssumedDead(BasicBlock *).
3142   bool isAssumedDead(const BasicBlock *BB) const override {
3143     assert(BB->getParent() == getAnchorScope() &&
3144            "BB must be in the same anchor scope function.");
3145 
3146     if (!getAssumed())
3147       return false;
3148     return !AssumedLiveBlocks.count(BB);
3149   }
3150 
3151   /// See AAIsDead::isKnownDead(BasicBlock *).
3152   bool isKnownDead(const BasicBlock *BB) const override {
3153     return getKnown() && isAssumedDead(BB);
3154   }
3155 
3156   /// See AAIsDead::isAssumed(Instruction *I).
3157   bool isAssumedDead(const Instruction *I) const override {
3158     assert(I->getParent()->getParent() == getAnchorScope() &&
3159            "Instruction must be in the same anchor scope function.");
3160 
3161     if (!getAssumed())
3162       return false;
3163 
3164     // If it is not in AssumedLiveBlocks then it for sure dead.
3165     // Otherwise, it can still be after noreturn call in a live block.
3166     if (!AssumedLiveBlocks.count(I->getParent()))
3167       return true;
3168 
3169     // If it is not after a liveness barrier it is live.
3170     const Instruction *PrevI = I->getPrevNode();
3171     while (PrevI) {
3172       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3173         return true;
3174       PrevI = PrevI->getPrevNode();
3175     }
3176     return false;
3177   }
3178 
3179   /// See AAIsDead::isKnownDead(Instruction *I).
3180   bool isKnownDead(const Instruction *I) const override {
3181     return getKnown() && isAssumedDead(I);
3182   }
3183 
3184   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3185   /// that internal function called from \p BB should now be looked at.
3186   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3187     if (!AssumedLiveBlocks.insert(&BB).second)
3188       return false;
3189 
3190     // We assume that all of BB is (probably) live now and if there are calls to
3191     // internal functions we will assume that those are now live as well. This
3192     // is a performance optimization for blocks with calls to a lot of internal
3193     // functions. It can however cause dead functions to be treated as live.
3194     for (const Instruction &I : BB)
3195       if (const auto *CB = dyn_cast<CallBase>(&I))
3196         if (const Function *F = CB->getCalledFunction())
3197           if (F->hasLocalLinkage())
3198             A.markLiveInternalFunction(*F);
3199     return true;
3200   }
3201 
3202   /// Collection of instructions that need to be explored again, e.g., we
3203   /// did assume they do not transfer control to (one of their) successors.
3204   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3205 
3206   /// Collection of instructions that are known to not transfer control.
3207   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3208 
3209   /// Collection of all assumed live edges
3210   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3211 
3212   /// Collection of all assumed live BasicBlocks.
3213   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3214 };
3215 
3216 static bool
3217 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3218                         AbstractAttribute &AA,
3219                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3220   const IRPosition &IPos = IRPosition::callsite_function(CB);
3221 
3222   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3223       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3224   if (NoReturnAA.isAssumedNoReturn())
3225     return !NoReturnAA.isKnownNoReturn();
3226   if (CB.isTerminator())
3227     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3228   else
3229     AliveSuccessors.push_back(CB.getNextNode());
3230   return false;
3231 }
3232 
3233 static bool
3234 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3235                         AbstractAttribute &AA,
3236                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3237   bool UsedAssumedInformation =
3238       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3239 
3240   // First, determine if we can change an invoke to a call assuming the
3241   // callee is nounwind. This is not possible if the personality of the
3242   // function allows to catch asynchronous exceptions.
3243   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3244     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3245   } else {
3246     const IRPosition &IPos = IRPosition::callsite_function(II);
3247     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3248         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3249     if (AANoUnw.isAssumedNoUnwind()) {
3250       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3251     } else {
3252       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3253     }
3254   }
3255   return UsedAssumedInformation;
3256 }
3257 
3258 static bool
3259 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3260                         AbstractAttribute &AA,
3261                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3262   bool UsedAssumedInformation = false;
3263   if (BI.getNumSuccessors() == 1) {
3264     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3265   } else {
3266     Optional<ConstantInt *> CI = getAssumedConstantInt(
3267         A, *BI.getCondition(), AA, UsedAssumedInformation);
3268     if (!CI.hasValue()) {
3269       // No value yet, assume both edges are dead.
3270     } else if (CI.getValue()) {
3271       const BasicBlock *SuccBB =
3272           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3273       AliveSuccessors.push_back(&SuccBB->front());
3274     } else {
3275       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3276       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3277       UsedAssumedInformation = false;
3278     }
3279   }
3280   return UsedAssumedInformation;
3281 }
3282 
3283 static bool
3284 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3285                         AbstractAttribute &AA,
3286                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3287   bool UsedAssumedInformation = false;
3288   Optional<ConstantInt *> CI =
3289       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3290   if (!CI.hasValue()) {
3291     // No value yet, assume all edges are dead.
3292   } else if (CI.getValue()) {
3293     for (auto &CaseIt : SI.cases()) {
3294       if (CaseIt.getCaseValue() == CI.getValue()) {
3295         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3296         return UsedAssumedInformation;
3297       }
3298     }
3299     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3300     return UsedAssumedInformation;
3301   } else {
3302     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3303       AliveSuccessors.push_back(&SuccBB->front());
3304   }
3305   return UsedAssumedInformation;
3306 }
3307 
3308 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3309   ChangeStatus Change = ChangeStatus::UNCHANGED;
3310 
3311   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3312                     << getAnchorScope()->size() << "] BBs and "
3313                     << ToBeExploredFrom.size() << " exploration points and "
3314                     << KnownDeadEnds.size() << " known dead ends\n");
3315 
3316   // Copy and clear the list of instructions we need to explore from. It is
3317   // refilled with instructions the next update has to look at.
3318   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3319                                                ToBeExploredFrom.end());
3320   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3321 
3322   SmallVector<const Instruction *, 8> AliveSuccessors;
3323   while (!Worklist.empty()) {
3324     const Instruction *I = Worklist.pop_back_val();
3325     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3326 
3327     // Fast forward for uninteresting instructions. We could look for UB here
3328     // though.
3329     while (!I->isTerminator() && !isa<CallBase>(I)) {
3330       Change = ChangeStatus::CHANGED;
3331       I = I->getNextNode();
3332     }
3333 
3334     AliveSuccessors.clear();
3335 
3336     bool UsedAssumedInformation = false;
3337     switch (I->getOpcode()) {
3338     // TODO: look for (assumed) UB to backwards propagate "deadness".
3339     default:
3340       assert(I->isTerminator() &&
3341              "Expected non-terminators to be handled already!");
3342       for (const BasicBlock *SuccBB : successors(I->getParent()))
3343         AliveSuccessors.push_back(&SuccBB->front());
3344       break;
3345     case Instruction::Call:
3346       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3347                                                        *this, AliveSuccessors);
3348       break;
3349     case Instruction::Invoke:
3350       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3351                                                        *this, AliveSuccessors);
3352       break;
3353     case Instruction::Br:
3354       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3355                                                        *this, AliveSuccessors);
3356       break;
3357     case Instruction::Switch:
3358       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3359                                                        *this, AliveSuccessors);
3360       break;
3361     }
3362 
3363     if (UsedAssumedInformation) {
3364       NewToBeExploredFrom.insert(I);
3365     } else {
3366       Change = ChangeStatus::CHANGED;
3367       if (AliveSuccessors.empty() ||
3368           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3369         KnownDeadEnds.insert(I);
3370     }
3371 
3372     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3373                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3374                       << UsedAssumedInformation << "\n");
3375 
3376     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3377       if (!I->isTerminator()) {
3378         assert(AliveSuccessors.size() == 1 &&
3379                "Non-terminator expected to have a single successor!");
3380         Worklist.push_back(AliveSuccessor);
3381       } else {
3382         // record the assumed live edge
3383         AssumedLiveEdges.insert(
3384             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3385         if (assumeLive(A, *AliveSuccessor->getParent()))
3386           Worklist.push_back(AliveSuccessor);
3387       }
3388     }
3389   }
3390 
3391   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3392 
3393   // If we know everything is live there is no need to query for liveness.
3394   // Instead, indicating a pessimistic fixpoint will cause the state to be
3395   // "invalid" and all queries to be answered conservatively without lookups.
3396   // To be in this state we have to (1) finished the exploration and (3) not
3397   // discovered any non-trivial dead end and (2) not ruled unreachable code
3398   // dead.
3399   if (ToBeExploredFrom.empty() &&
3400       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3401       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3402         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3403       }))
3404     return indicatePessimisticFixpoint();
3405   return Change;
3406 }
3407 
3408 /// Liveness information for a call sites.
3409 struct AAIsDeadCallSite final : AAIsDeadFunction {
3410   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3411       : AAIsDeadFunction(IRP, A) {}
3412 
3413   /// See AbstractAttribute::initialize(...).
3414   void initialize(Attributor &A) override {
3415     // TODO: Once we have call site specific value information we can provide
3416     //       call site specific liveness information and then it makes
3417     //       sense to specialize attributes for call sites instead of
3418     //       redirecting requests to the callee.
3419     llvm_unreachable("Abstract attributes for liveness are not "
3420                      "supported for call sites yet!");
3421   }
3422 
3423   /// See AbstractAttribute::updateImpl(...).
3424   ChangeStatus updateImpl(Attributor &A) override {
3425     return indicatePessimisticFixpoint();
3426   }
3427 
3428   /// See AbstractAttribute::trackStatistics()
3429   void trackStatistics() const override {}
3430 };
3431 
3432 /// -------------------- Dereferenceable Argument Attribute --------------------
3433 
3434 template <>
3435 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3436                                                      const DerefState &R) {
3437   ChangeStatus CS0 =
3438       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3439   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3440   return CS0 | CS1;
3441 }
3442 
3443 struct AADereferenceableImpl : AADereferenceable {
3444   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3445       : AADereferenceable(IRP, A) {}
3446   using StateType = DerefState;
3447 
3448   /// See AbstractAttribute::initialize(...).
3449   void initialize(Attributor &A) override {
3450     SmallVector<Attribute, 4> Attrs;
3451     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3452              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3453     for (const Attribute &Attr : Attrs)
3454       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3455 
3456     const IRPosition &IRP = this->getIRPosition();
3457     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3458                                        /* TrackDependence */ false);
3459 
3460     bool CanBeNull;
3461     takeKnownDerefBytesMaximum(
3462         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3463             A.getDataLayout(), CanBeNull));
3464 
3465     bool IsFnInterface = IRP.isFnInterfaceKind();
3466     Function *FnScope = IRP.getAnchorScope();
3467     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3468       indicatePessimisticFixpoint();
3469       return;
3470     }
3471 
3472     if (Instruction *CtxI = getCtxI())
3473       followUsesInMBEC(*this, A, getState(), *CtxI);
3474   }
3475 
3476   /// See AbstractAttribute::getState()
3477   /// {
3478   StateType &getState() override { return *this; }
3479   const StateType &getState() const override { return *this; }
3480   /// }
3481 
3482   /// Helper function for collecting accessed bytes in must-be-executed-context
3483   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3484                               DerefState &State) {
3485     const Value *UseV = U->get();
3486     if (!UseV->getType()->isPointerTy())
3487       return;
3488 
3489     Type *PtrTy = UseV->getType();
3490     const DataLayout &DL = A.getDataLayout();
3491     int64_t Offset;
3492     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3493             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3494       if (Base == &getAssociatedValue() &&
3495           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3496         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3497         State.addAccessedBytes(Offset, Size);
3498       }
3499     }
3500     return;
3501   }
3502 
3503   /// See followUsesInMBEC
3504   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3505                        AADereferenceable::StateType &State) {
3506     bool IsNonNull = false;
3507     bool TrackUse = false;
3508     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3509         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3510     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3511                       << " for instruction " << *I << "\n");
3512 
3513     addAccessedBytesForUse(A, U, I, State);
3514     State.takeKnownDerefBytesMaximum(DerefBytes);
3515     return TrackUse;
3516   }
3517 
3518   /// See AbstractAttribute::manifest(...).
3519   ChangeStatus manifest(Attributor &A) override {
3520     ChangeStatus Change = AADereferenceable::manifest(A);
3521     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3522       removeAttrs({Attribute::DereferenceableOrNull});
3523       return ChangeStatus::CHANGED;
3524     }
3525     return Change;
3526   }
3527 
3528   void getDeducedAttributes(LLVMContext &Ctx,
3529                             SmallVectorImpl<Attribute> &Attrs) const override {
3530     // TODO: Add *_globally support
3531     if (isAssumedNonNull())
3532       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3533           Ctx, getAssumedDereferenceableBytes()));
3534     else
3535       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3536           Ctx, getAssumedDereferenceableBytes()));
3537   }
3538 
3539   /// See AbstractAttribute::getAsStr().
3540   const std::string getAsStr() const override {
3541     if (!getAssumedDereferenceableBytes())
3542       return "unknown-dereferenceable";
3543     return std::string("dereferenceable") +
3544            (isAssumedNonNull() ? "" : "_or_null") +
3545            (isAssumedGlobal() ? "_globally" : "") + "<" +
3546            std::to_string(getKnownDereferenceableBytes()) + "-" +
3547            std::to_string(getAssumedDereferenceableBytes()) + ">";
3548   }
3549 };
3550 
3551 /// Dereferenceable attribute for a floating value.
3552 struct AADereferenceableFloating : AADereferenceableImpl {
3553   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3554       : AADereferenceableImpl(IRP, A) {}
3555 
3556   /// See AbstractAttribute::updateImpl(...).
3557   ChangeStatus updateImpl(Attributor &A) override {
3558     const DataLayout &DL = A.getDataLayout();
3559 
3560     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3561                             bool Stripped) -> bool {
3562       unsigned IdxWidth =
3563           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3564       APInt Offset(IdxWidth, 0);
3565       const Value *Base =
3566           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3567 
3568       const auto &AA =
3569           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3570       int64_t DerefBytes = 0;
3571       if (!Stripped && this == &AA) {
3572         // Use IR information if we did not strip anything.
3573         // TODO: track globally.
3574         bool CanBeNull;
3575         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3576         T.GlobalState.indicatePessimisticFixpoint();
3577       } else {
3578         const DerefState &DS = AA.getState();
3579         DerefBytes = DS.DerefBytesState.getAssumed();
3580         T.GlobalState &= DS.GlobalState;
3581       }
3582 
3583       // For now we do not try to "increase" dereferenceability due to negative
3584       // indices as we first have to come up with code to deal with loops and
3585       // for overflows of the dereferenceable bytes.
3586       int64_t OffsetSExt = Offset.getSExtValue();
3587       if (OffsetSExt < 0)
3588         OffsetSExt = 0;
3589 
3590       T.takeAssumedDerefBytesMinimum(
3591           std::max(int64_t(0), DerefBytes - OffsetSExt));
3592 
3593       if (this == &AA) {
3594         if (!Stripped) {
3595           // If nothing was stripped IR information is all we got.
3596           T.takeKnownDerefBytesMaximum(
3597               std::max(int64_t(0), DerefBytes - OffsetSExt));
3598           T.indicatePessimisticFixpoint();
3599         } else if (OffsetSExt > 0) {
3600           // If something was stripped but there is circular reasoning we look
3601           // for the offset. If it is positive we basically decrease the
3602           // dereferenceable bytes in a circluar loop now, which will simply
3603           // drive them down to the known value in a very slow way which we
3604           // can accelerate.
3605           T.indicatePessimisticFixpoint();
3606         }
3607       }
3608 
3609       return T.isValidState();
3610     };
3611 
3612     DerefState T;
3613     if (!genericValueTraversal<AADereferenceable, DerefState>(
3614             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3615       return indicatePessimisticFixpoint();
3616 
3617     return clampStateAndIndicateChange(getState(), T);
3618   }
3619 
3620   /// See AbstractAttribute::trackStatistics()
3621   void trackStatistics() const override {
3622     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3623   }
3624 };
3625 
3626 /// Dereferenceable attribute for a return value.
3627 struct AADereferenceableReturned final
3628     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3629   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3630       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3631             IRP, A) {}
3632 
3633   /// See AbstractAttribute::trackStatistics()
3634   void trackStatistics() const override {
3635     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3636   }
3637 };
3638 
3639 /// Dereferenceable attribute for an argument
3640 struct AADereferenceableArgument final
3641     : AAArgumentFromCallSiteArguments<AADereferenceable,
3642                                       AADereferenceableImpl> {
3643   using Base =
3644       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3645   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3646       : Base(IRP, A) {}
3647 
3648   /// See AbstractAttribute::trackStatistics()
3649   void trackStatistics() const override {
3650     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3651   }
3652 };
3653 
3654 /// Dereferenceable attribute for a call site argument.
3655 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3656   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3657       : AADereferenceableFloating(IRP, A) {}
3658 
3659   /// See AbstractAttribute::trackStatistics()
3660   void trackStatistics() const override {
3661     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3662   }
3663 };
3664 
3665 /// Dereferenceable attribute deduction for a call site return value.
3666 struct AADereferenceableCallSiteReturned final
3667     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3668   using Base =
3669       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3670   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3671       : Base(IRP, A) {}
3672 
3673   /// See AbstractAttribute::trackStatistics()
3674   void trackStatistics() const override {
3675     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3676   }
3677 };
3678 
3679 // ------------------------ Align Argument Attribute ------------------------
3680 
3681 static unsigned getKnownAlignForUse(Attributor &A,
3682                                     AbstractAttribute &QueryingAA,
3683                                     Value &AssociatedValue, const Use *U,
3684                                     const Instruction *I, bool &TrackUse) {
3685   // We need to follow common pointer manipulation uses to the accesses they
3686   // feed into.
3687   if (isa<CastInst>(I)) {
3688     // Follow all but ptr2int casts.
3689     TrackUse = !isa<PtrToIntInst>(I);
3690     return 0;
3691   }
3692   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3693     if (GEP->hasAllConstantIndices()) {
3694       TrackUse = true;
3695       return 0;
3696     }
3697   }
3698 
3699   MaybeAlign MA;
3700   if (const auto *CB = dyn_cast<CallBase>(I)) {
3701     if (CB->isBundleOperand(U) || CB->isCallee(U))
3702       return 0;
3703 
3704     unsigned ArgNo = CB->getArgOperandNo(U);
3705     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3706     // As long as we only use known information there is no need to track
3707     // dependences here.
3708     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3709                                         /* TrackDependence */ false);
3710     MA = MaybeAlign(AlignAA.getKnownAlign());
3711   }
3712 
3713   const DataLayout &DL = A.getDataLayout();
3714   const Value *UseV = U->get();
3715   if (auto *SI = dyn_cast<StoreInst>(I)) {
3716     if (SI->getPointerOperand() == UseV)
3717       MA = SI->getAlign();
3718   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3719     if (LI->getPointerOperand() == UseV)
3720       MA = LI->getAlign();
3721   }
3722 
3723   if (!MA || *MA <= 1)
3724     return 0;
3725 
3726   unsigned Alignment = MA->value();
3727   int64_t Offset;
3728 
3729   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3730     if (Base == &AssociatedValue) {
3731       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3732       // So we can say that the maximum power of two which is a divisor of
3733       // gcd(Offset, Alignment) is an alignment.
3734 
3735       uint32_t gcd =
3736           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3737       Alignment = llvm::PowerOf2Floor(gcd);
3738     }
3739   }
3740 
3741   return Alignment;
3742 }
3743 
3744 struct AAAlignImpl : AAAlign {
3745   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3746 
3747   /// See AbstractAttribute::initialize(...).
3748   void initialize(Attributor &A) override {
3749     SmallVector<Attribute, 4> Attrs;
3750     getAttrs({Attribute::Alignment}, Attrs);
3751     for (const Attribute &Attr : Attrs)
3752       takeKnownMaximum(Attr.getValueAsInt());
3753 
3754     Value &V = getAssociatedValue();
3755     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3756     //       use of the function pointer. This was caused by D73131. We want to
3757     //       avoid this for function pointers especially because we iterate
3758     //       their uses and int2ptr is not handled. It is not a correctness
3759     //       problem though!
3760     if (!V.getType()->getPointerElementType()->isFunctionTy())
3761       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3762 
3763     if (getIRPosition().isFnInterfaceKind() &&
3764         (!getAnchorScope() ||
3765          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3766       indicatePessimisticFixpoint();
3767       return;
3768     }
3769 
3770     if (Instruction *CtxI = getCtxI())
3771       followUsesInMBEC(*this, A, getState(), *CtxI);
3772   }
3773 
3774   /// See AbstractAttribute::manifest(...).
3775   ChangeStatus manifest(Attributor &A) override {
3776     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3777 
3778     // Check for users that allow alignment annotations.
3779     Value &AssociatedValue = getAssociatedValue();
3780     for (const Use &U : AssociatedValue.uses()) {
3781       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3782         if (SI->getPointerOperand() == &AssociatedValue)
3783           if (SI->getAlignment() < getAssumedAlign()) {
3784             STATS_DECLTRACK(AAAlign, Store,
3785                             "Number of times alignment added to a store");
3786             SI->setAlignment(Align(getAssumedAlign()));
3787             LoadStoreChanged = ChangeStatus::CHANGED;
3788           }
3789       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3790         if (LI->getPointerOperand() == &AssociatedValue)
3791           if (LI->getAlignment() < getAssumedAlign()) {
3792             LI->setAlignment(Align(getAssumedAlign()));
3793             STATS_DECLTRACK(AAAlign, Load,
3794                             "Number of times alignment added to a load");
3795             LoadStoreChanged = ChangeStatus::CHANGED;
3796           }
3797       }
3798     }
3799 
3800     ChangeStatus Changed = AAAlign::manifest(A);
3801 
3802     Align InheritAlign =
3803         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3804     if (InheritAlign >= getAssumedAlign())
3805       return LoadStoreChanged;
3806     return Changed | LoadStoreChanged;
3807   }
3808 
3809   // TODO: Provide a helper to determine the implied ABI alignment and check in
3810   //       the existing manifest method and a new one for AAAlignImpl that value
3811   //       to avoid making the alignment explicit if it did not improve.
3812 
3813   /// See AbstractAttribute::getDeducedAttributes
3814   virtual void
3815   getDeducedAttributes(LLVMContext &Ctx,
3816                        SmallVectorImpl<Attribute> &Attrs) const override {
3817     if (getAssumedAlign() > 1)
3818       Attrs.emplace_back(
3819           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3820   }
3821 
3822   /// See followUsesInMBEC
3823   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3824                        AAAlign::StateType &State) {
3825     bool TrackUse = false;
3826 
3827     unsigned int KnownAlign =
3828         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3829     State.takeKnownMaximum(KnownAlign);
3830 
3831     return TrackUse;
3832   }
3833 
3834   /// See AbstractAttribute::getAsStr().
3835   const std::string getAsStr() const override {
3836     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3837                                 "-" + std::to_string(getAssumedAlign()) + ">")
3838                              : "unknown-align";
3839   }
3840 };
3841 
3842 /// Align attribute for a floating value.
3843 struct AAAlignFloating : AAAlignImpl {
3844   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3845 
3846   /// See AbstractAttribute::updateImpl(...).
3847   ChangeStatus updateImpl(Attributor &A) override {
3848     const DataLayout &DL = A.getDataLayout();
3849 
3850     auto VisitValueCB = [&](Value &V, const Instruction *,
3851                             AAAlign::StateType &T, bool Stripped) -> bool {
3852       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3853       if (!Stripped && this == &AA) {
3854         int64_t Offset;
3855         unsigned Alignment = 1;
3856         if (const Value *Base =
3857                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3858           Align PA = Base->getPointerAlignment(DL);
3859           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3860           // So we can say that the maximum power of two which is a divisor of
3861           // gcd(Offset, Alignment) is an alignment.
3862 
3863           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3864                                                uint32_t(PA.value()));
3865           Alignment = llvm::PowerOf2Floor(gcd);
3866         } else {
3867           Alignment = V.getPointerAlignment(DL).value();
3868         }
3869         // Use only IR information if we did not strip anything.
3870         T.takeKnownMaximum(Alignment);
3871         T.indicatePessimisticFixpoint();
3872       } else {
3873         // Use abstract attribute information.
3874         const AAAlign::StateType &DS = AA.getState();
3875         T ^= DS;
3876       }
3877       return T.isValidState();
3878     };
3879 
3880     StateType T;
3881     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3882                                                    VisitValueCB, getCtxI()))
3883       return indicatePessimisticFixpoint();
3884 
3885     // TODO: If we know we visited all incoming values, thus no are assumed
3886     // dead, we can take the known information from the state T.
3887     return clampStateAndIndicateChange(getState(), T);
3888   }
3889 
3890   /// See AbstractAttribute::trackStatistics()
3891   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3892 };
3893 
3894 /// Align attribute for function return value.
3895 struct AAAlignReturned final
3896     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3897   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3898   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3899 
3900   /// See AbstractAttribute::initialize(...).
3901   void initialize(Attributor &A) override {
3902     Base::initialize(A);
3903     Function *F = getAssociatedFunction();
3904     if (!F || F->isDeclaration())
3905       indicatePessimisticFixpoint();
3906   }
3907 
3908   /// See AbstractAttribute::trackStatistics()
3909   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3910 };
3911 
3912 /// Align attribute for function argument.
3913 struct AAAlignArgument final
3914     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3915   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3916   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3917 
3918   /// See AbstractAttribute::manifest(...).
3919   ChangeStatus manifest(Attributor &A) override {
3920     // If the associated argument is involved in a must-tail call we give up
3921     // because we would need to keep the argument alignments of caller and
3922     // callee in-sync. Just does not seem worth the trouble right now.
3923     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3924       return ChangeStatus::UNCHANGED;
3925     return Base::manifest(A);
3926   }
3927 
3928   /// See AbstractAttribute::trackStatistics()
3929   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3930 };
3931 
3932 struct AAAlignCallSiteArgument final : AAAlignFloating {
3933   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3934       : AAAlignFloating(IRP, A) {}
3935 
3936   /// See AbstractAttribute::manifest(...).
3937   ChangeStatus manifest(Attributor &A) override {
3938     // If the associated argument is involved in a must-tail call we give up
3939     // because we would need to keep the argument alignments of caller and
3940     // callee in-sync. Just does not seem worth the trouble right now.
3941     if (Argument *Arg = getAssociatedArgument())
3942       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3943         return ChangeStatus::UNCHANGED;
3944     ChangeStatus Changed = AAAlignImpl::manifest(A);
3945     Align InheritAlign =
3946         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3947     if (InheritAlign >= getAssumedAlign())
3948       Changed = ChangeStatus::UNCHANGED;
3949     return Changed;
3950   }
3951 
3952   /// See AbstractAttribute::updateImpl(Attributor &A).
3953   ChangeStatus updateImpl(Attributor &A) override {
3954     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3955     if (Argument *Arg = getAssociatedArgument()) {
3956       // We only take known information from the argument
3957       // so we do not need to track a dependence.
3958       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3959           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3960       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3961     }
3962     return Changed;
3963   }
3964 
3965   /// See AbstractAttribute::trackStatistics()
3966   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3967 };
3968 
3969 /// Align attribute deduction for a call site return value.
3970 struct AAAlignCallSiteReturned final
3971     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3972   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3973   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3974       : Base(IRP, A) {}
3975 
3976   /// See AbstractAttribute::initialize(...).
3977   void initialize(Attributor &A) override {
3978     Base::initialize(A);
3979     Function *F = getAssociatedFunction();
3980     if (!F || F->isDeclaration())
3981       indicatePessimisticFixpoint();
3982   }
3983 
3984   /// See AbstractAttribute::trackStatistics()
3985   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3986 };
3987 
3988 /// ------------------ Function No-Return Attribute ----------------------------
3989 struct AANoReturnImpl : public AANoReturn {
3990   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3991 
3992   /// See AbstractAttribute::initialize(...).
3993   void initialize(Attributor &A) override {
3994     AANoReturn::initialize(A);
3995     Function *F = getAssociatedFunction();
3996     if (!F || F->isDeclaration())
3997       indicatePessimisticFixpoint();
3998   }
3999 
4000   /// See AbstractAttribute::getAsStr().
4001   const std::string getAsStr() const override {
4002     return getAssumed() ? "noreturn" : "may-return";
4003   }
4004 
4005   /// See AbstractAttribute::updateImpl(Attributor &A).
4006   virtual ChangeStatus updateImpl(Attributor &A) override {
4007     auto CheckForNoReturn = [](Instruction &) { return false; };
4008     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4009                                    {(unsigned)Instruction::Ret}))
4010       return indicatePessimisticFixpoint();
4011     return ChangeStatus::UNCHANGED;
4012   }
4013 };
4014 
4015 struct AANoReturnFunction final : AANoReturnImpl {
4016   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4017       : AANoReturnImpl(IRP, A) {}
4018 
4019   /// See AbstractAttribute::trackStatistics()
4020   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4021 };
4022 
4023 /// NoReturn attribute deduction for a call sites.
4024 struct AANoReturnCallSite final : AANoReturnImpl {
4025   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4026       : AANoReturnImpl(IRP, A) {}
4027 
4028   /// See AbstractAttribute::initialize(...).
4029   void initialize(Attributor &A) override {
4030     AANoReturnImpl::initialize(A);
4031     if (Function *F = getAssociatedFunction()) {
4032       const IRPosition &FnPos = IRPosition::function(*F);
4033       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4034       if (!FnAA.isAssumedNoReturn())
4035         indicatePessimisticFixpoint();
4036     }
4037   }
4038 
4039   /// See AbstractAttribute::updateImpl(...).
4040   ChangeStatus updateImpl(Attributor &A) override {
4041     // TODO: Once we have call site specific value information we can provide
4042     //       call site specific liveness information and then it makes
4043     //       sense to specialize attributes for call sites arguments instead of
4044     //       redirecting requests to the callee argument.
4045     Function *F = getAssociatedFunction();
4046     const IRPosition &FnPos = IRPosition::function(*F);
4047     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4048     return clampStateAndIndicateChange(getState(), FnAA.getState());
4049   }
4050 
4051   /// See AbstractAttribute::trackStatistics()
4052   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4053 };
4054 
4055 /// ----------------------- Variable Capturing ---------------------------------
4056 
4057 /// A class to hold the state of for no-capture attributes.
4058 struct AANoCaptureImpl : public AANoCapture {
4059   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4060 
4061   /// See AbstractAttribute::initialize(...).
4062   void initialize(Attributor &A) override {
4063     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4064       indicateOptimisticFixpoint();
4065       return;
4066     }
4067     Function *AnchorScope = getAnchorScope();
4068     if (isFnInterfaceKind() &&
4069         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4070       indicatePessimisticFixpoint();
4071       return;
4072     }
4073 
4074     // You cannot "capture" null in the default address space.
4075     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4076         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4077       indicateOptimisticFixpoint();
4078       return;
4079     }
4080 
4081     const Function *F =
4082         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4083 
4084     // Check what state the associated function can actually capture.
4085     if (F)
4086       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4087     else
4088       indicatePessimisticFixpoint();
4089   }
4090 
4091   /// See AbstractAttribute::updateImpl(...).
4092   ChangeStatus updateImpl(Attributor &A) override;
4093 
4094   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4095   virtual void
4096   getDeducedAttributes(LLVMContext &Ctx,
4097                        SmallVectorImpl<Attribute> &Attrs) const override {
4098     if (!isAssumedNoCaptureMaybeReturned())
4099       return;
4100 
4101     if (isArgumentPosition()) {
4102       if (isAssumedNoCapture())
4103         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4104       else if (ManifestInternal)
4105         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4106     }
4107   }
4108 
4109   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4110   /// depending on the ability of the function associated with \p IRP to capture
4111   /// state in memory and through "returning/throwing", respectively.
4112   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4113                                                    const Function &F,
4114                                                    BitIntegerState &State) {
4115     // TODO: Once we have memory behavior attributes we should use them here.
4116 
4117     // If we know we cannot communicate or write to memory, we do not care about
4118     // ptr2int anymore.
4119     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4120         F.getReturnType()->isVoidTy()) {
4121       State.addKnownBits(NO_CAPTURE);
4122       return;
4123     }
4124 
4125     // A function cannot capture state in memory if it only reads memory, it can
4126     // however return/throw state and the state might be influenced by the
4127     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4128     if (F.onlyReadsMemory())
4129       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4130 
4131     // A function cannot communicate state back if it does not through
4132     // exceptions and doesn not return values.
4133     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4134       State.addKnownBits(NOT_CAPTURED_IN_RET);
4135 
4136     // Check existing "returned" attributes.
4137     int ArgNo = IRP.getCalleeArgNo();
4138     if (F.doesNotThrow() && ArgNo >= 0) {
4139       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4140         if (F.hasParamAttribute(u, Attribute::Returned)) {
4141           if (u == unsigned(ArgNo))
4142             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4143           else if (F.onlyReadsMemory())
4144             State.addKnownBits(NO_CAPTURE);
4145           else
4146             State.addKnownBits(NOT_CAPTURED_IN_RET);
4147           break;
4148         }
4149     }
4150   }
4151 
4152   /// See AbstractState::getAsStr().
4153   const std::string getAsStr() const override {
4154     if (isKnownNoCapture())
4155       return "known not-captured";
4156     if (isAssumedNoCapture())
4157       return "assumed not-captured";
4158     if (isKnownNoCaptureMaybeReturned())
4159       return "known not-captured-maybe-returned";
4160     if (isAssumedNoCaptureMaybeReturned())
4161       return "assumed not-captured-maybe-returned";
4162     return "assumed-captured";
4163   }
4164 };
4165 
4166 /// Attributor-aware capture tracker.
4167 struct AACaptureUseTracker final : public CaptureTracker {
4168 
4169   /// Create a capture tracker that can lookup in-flight abstract attributes
4170   /// through the Attributor \p A.
4171   ///
4172   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4173   /// search is stopped. If a use leads to a return instruction,
4174   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4175   /// If a use leads to a ptr2int which may capture the value,
4176   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4177   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4178   /// set. All values in \p PotentialCopies are later tracked as well. For every
4179   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4180   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4181   /// conservatively set to true.
4182   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4183                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4184                       SmallVectorImpl<const Value *> &PotentialCopies,
4185                       unsigned &RemainingUsesToExplore)
4186       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4187         PotentialCopies(PotentialCopies),
4188         RemainingUsesToExplore(RemainingUsesToExplore) {}
4189 
4190   /// Determine if \p V maybe captured. *Also updates the state!*
4191   bool valueMayBeCaptured(const Value *V) {
4192     if (V->getType()->isPointerTy()) {
4193       PointerMayBeCaptured(V, this);
4194     } else {
4195       State.indicatePessimisticFixpoint();
4196     }
4197     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4198   }
4199 
4200   /// See CaptureTracker::tooManyUses().
4201   void tooManyUses() override {
4202     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4203   }
4204 
4205   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4206     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4207       return true;
4208     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4209         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4210         DepClassTy::OPTIONAL);
4211     return DerefAA.getAssumedDereferenceableBytes();
4212   }
4213 
4214   /// See CaptureTracker::captured(...).
4215   bool captured(const Use *U) override {
4216     Instruction *UInst = cast<Instruction>(U->getUser());
4217     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4218                       << "\n");
4219 
4220     // Because we may reuse the tracker multiple times we keep track of the
4221     // number of explored uses ourselves as well.
4222     if (RemainingUsesToExplore-- == 0) {
4223       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4224       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4225                           /* Return */ true);
4226     }
4227 
4228     // Deal with ptr2int by following uses.
4229     if (isa<PtrToIntInst>(UInst)) {
4230       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4231       return valueMayBeCaptured(UInst);
4232     }
4233 
4234     // Explicitly catch return instructions.
4235     if (isa<ReturnInst>(UInst))
4236       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4237                           /* Return */ true);
4238 
4239     // For now we only use special logic for call sites. However, the tracker
4240     // itself knows about a lot of other non-capturing cases already.
4241     auto *CB = dyn_cast<CallBase>(UInst);
4242     if (!CB || !CB->isArgOperand(U))
4243       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4244                           /* Return */ true);
4245 
4246     unsigned ArgNo = CB->getArgOperandNo(U);
4247     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4248     // If we have a abstract no-capture attribute for the argument we can use
4249     // it to justify a non-capture attribute here. This allows recursion!
4250     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4251     if (ArgNoCaptureAA.isAssumedNoCapture())
4252       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4253                           /* Return */ false);
4254     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4255       addPotentialCopy(*CB);
4256       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4257                           /* Return */ false);
4258     }
4259 
4260     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4261     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4262                         /* Return */ true);
4263   }
4264 
4265   /// Register \p CS as potential copy of the value we are checking.
4266   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4267 
4268   /// See CaptureTracker::shouldExplore(...).
4269   bool shouldExplore(const Use *U) override {
4270     // Check liveness and ignore droppable users.
4271     return !U->getUser()->isDroppable() &&
4272            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4273   }
4274 
4275   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4276   /// \p CapturedInRet, then return the appropriate value for use in the
4277   /// CaptureTracker::captured() interface.
4278   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4279                     bool CapturedInRet) {
4280     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4281                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4282     if (CapturedInMem)
4283       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4284     if (CapturedInInt)
4285       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4286     if (CapturedInRet)
4287       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4288     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4289   }
4290 
4291 private:
4292   /// The attributor providing in-flight abstract attributes.
4293   Attributor &A;
4294 
4295   /// The abstract attribute currently updated.
4296   AANoCapture &NoCaptureAA;
4297 
4298   /// The abstract liveness state.
4299   const AAIsDead &IsDeadAA;
4300 
4301   /// The state currently updated.
4302   AANoCapture::StateType &State;
4303 
4304   /// Set of potential copies of the tracked value.
4305   SmallVectorImpl<const Value *> &PotentialCopies;
4306 
4307   /// Global counter to limit the number of explored uses.
4308   unsigned &RemainingUsesToExplore;
4309 };
4310 
4311 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4312   const IRPosition &IRP = getIRPosition();
4313   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4314                                         : &IRP.getAssociatedValue();
4315   if (!V)
4316     return indicatePessimisticFixpoint();
4317 
4318   const Function *F =
4319       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4320   assert(F && "Expected a function!");
4321   const IRPosition &FnPos = IRPosition::function(*F);
4322   const auto &IsDeadAA =
4323       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4324 
4325   AANoCapture::StateType T;
4326 
4327   // Readonly means we cannot capture through memory.
4328   const auto &FnMemAA =
4329       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4330   if (FnMemAA.isAssumedReadOnly()) {
4331     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4332     if (FnMemAA.isKnownReadOnly())
4333       addKnownBits(NOT_CAPTURED_IN_MEM);
4334     else
4335       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4336   }
4337 
4338   // Make sure all returned values are different than the underlying value.
4339   // TODO: we could do this in a more sophisticated way inside
4340   //       AAReturnedValues, e.g., track all values that escape through returns
4341   //       directly somehow.
4342   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4343     bool SeenConstant = false;
4344     for (auto &It : RVAA.returned_values()) {
4345       if (isa<Constant>(It.first)) {
4346         if (SeenConstant)
4347           return false;
4348         SeenConstant = true;
4349       } else if (!isa<Argument>(It.first) ||
4350                  It.first == getAssociatedArgument())
4351         return false;
4352     }
4353     return true;
4354   };
4355 
4356   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4357       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4358   if (NoUnwindAA.isAssumedNoUnwind()) {
4359     bool IsVoidTy = F->getReturnType()->isVoidTy();
4360     const AAReturnedValues *RVAA =
4361         IsVoidTy ? nullptr
4362                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4363                                                  /* TrackDependence */ true,
4364                                                  DepClassTy::OPTIONAL);
4365     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4366       T.addKnownBits(NOT_CAPTURED_IN_RET);
4367       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4368         return ChangeStatus::UNCHANGED;
4369       if (NoUnwindAA.isKnownNoUnwind() &&
4370           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4371         addKnownBits(NOT_CAPTURED_IN_RET);
4372         if (isKnown(NOT_CAPTURED_IN_MEM))
4373           return indicateOptimisticFixpoint();
4374       }
4375     }
4376   }
4377 
4378   // Use the CaptureTracker interface and logic with the specialized tracker,
4379   // defined in AACaptureUseTracker, that can look at in-flight abstract
4380   // attributes and directly updates the assumed state.
4381   SmallVector<const Value *, 4> PotentialCopies;
4382   unsigned RemainingUsesToExplore =
4383       getDefaultMaxUsesToExploreForCaptureTracking();
4384   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4385                               RemainingUsesToExplore);
4386 
4387   // Check all potential copies of the associated value until we can assume
4388   // none will be captured or we have to assume at least one might be.
4389   unsigned Idx = 0;
4390   PotentialCopies.push_back(V);
4391   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4392     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4393 
4394   AANoCapture::StateType &S = getState();
4395   auto Assumed = S.getAssumed();
4396   S.intersectAssumedBits(T.getAssumed());
4397   if (!isAssumedNoCaptureMaybeReturned())
4398     return indicatePessimisticFixpoint();
4399   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4400                                    : ChangeStatus::CHANGED;
4401 }
4402 
4403 /// NoCapture attribute for function arguments.
4404 struct AANoCaptureArgument final : AANoCaptureImpl {
4405   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4406       : AANoCaptureImpl(IRP, A) {}
4407 
4408   /// See AbstractAttribute::trackStatistics()
4409   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4410 };
4411 
4412 /// NoCapture attribute for call site arguments.
4413 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4414   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4415       : AANoCaptureImpl(IRP, A) {}
4416 
4417   /// See AbstractAttribute::initialize(...).
4418   void initialize(Attributor &A) override {
4419     if (Argument *Arg = getAssociatedArgument())
4420       if (Arg->hasByValAttr())
4421         indicateOptimisticFixpoint();
4422     AANoCaptureImpl::initialize(A);
4423   }
4424 
4425   /// See AbstractAttribute::updateImpl(...).
4426   ChangeStatus updateImpl(Attributor &A) override {
4427     // TODO: Once we have call site specific value information we can provide
4428     //       call site specific liveness information and then it makes
4429     //       sense to specialize attributes for call sites arguments instead of
4430     //       redirecting requests to the callee argument.
4431     Argument *Arg = getAssociatedArgument();
4432     if (!Arg)
4433       return indicatePessimisticFixpoint();
4434     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4435     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4436     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4437   }
4438 
4439   /// See AbstractAttribute::trackStatistics()
4440   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4441 };
4442 
4443 /// NoCapture attribute for floating values.
4444 struct AANoCaptureFloating final : AANoCaptureImpl {
4445   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4446       : AANoCaptureImpl(IRP, A) {}
4447 
4448   /// See AbstractAttribute::trackStatistics()
4449   void trackStatistics() const override {
4450     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4451   }
4452 };
4453 
4454 /// NoCapture attribute for function return value.
4455 struct AANoCaptureReturned final : AANoCaptureImpl {
4456   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4457       : AANoCaptureImpl(IRP, A) {
4458     llvm_unreachable("NoCapture is not applicable to function returns!");
4459   }
4460 
4461   /// See AbstractAttribute::initialize(...).
4462   void initialize(Attributor &A) override {
4463     llvm_unreachable("NoCapture is not applicable to function returns!");
4464   }
4465 
4466   /// See AbstractAttribute::updateImpl(...).
4467   ChangeStatus updateImpl(Attributor &A) override {
4468     llvm_unreachable("NoCapture is not applicable to function returns!");
4469   }
4470 
4471   /// See AbstractAttribute::trackStatistics()
4472   void trackStatistics() const override {}
4473 };
4474 
4475 /// NoCapture attribute deduction for a call site return value.
4476 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4477   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4478       : AANoCaptureImpl(IRP, A) {}
4479 
4480   /// See AbstractAttribute::trackStatistics()
4481   void trackStatistics() const override {
4482     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4483   }
4484 };
4485 
4486 /// ------------------ Value Simplify Attribute ----------------------------
4487 struct AAValueSimplifyImpl : AAValueSimplify {
4488   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4489       : AAValueSimplify(IRP, A) {}
4490 
4491   /// See AbstractAttribute::initialize(...).
4492   void initialize(Attributor &A) override {
4493     if (getAssociatedValue().getType()->isVoidTy())
4494       indicatePessimisticFixpoint();
4495   }
4496 
4497   /// See AbstractAttribute::getAsStr().
4498   const std::string getAsStr() const override {
4499     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4500                         : "not-simple";
4501   }
4502 
4503   /// See AbstractAttribute::trackStatistics()
4504   void trackStatistics() const override {}
4505 
4506   /// See AAValueSimplify::getAssumedSimplifiedValue()
4507   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4508     if (!getAssumed())
4509       return const_cast<Value *>(&getAssociatedValue());
4510     return SimplifiedAssociatedValue;
4511   }
4512 
4513   /// Helper function for querying AAValueSimplify and updating candicate.
4514   /// \param QueryingValue Value trying to unify with SimplifiedValue
4515   /// \param AccumulatedSimplifiedValue Current simplification result.
4516   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4517                              Value &QueryingValue,
4518                              Optional<Value *> &AccumulatedSimplifiedValue) {
4519     // FIXME: Add a typecast support.
4520 
4521     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4522         QueryingAA, IRPosition::value(QueryingValue));
4523 
4524     Optional<Value *> QueryingValueSimplified =
4525         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4526 
4527     if (!QueryingValueSimplified.hasValue())
4528       return true;
4529 
4530     if (!QueryingValueSimplified.getValue())
4531       return false;
4532 
4533     Value &QueryingValueSimplifiedUnwrapped =
4534         *QueryingValueSimplified.getValue();
4535 
4536     if (AccumulatedSimplifiedValue.hasValue() &&
4537         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4538         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4539       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4540     if (AccumulatedSimplifiedValue.hasValue() &&
4541         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4542       return true;
4543 
4544     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4545                       << " is assumed to be "
4546                       << QueryingValueSimplifiedUnwrapped << "\n");
4547 
4548     AccumulatedSimplifiedValue = QueryingValueSimplified;
4549     return true;
4550   }
4551 
4552   /// Returns a candidate is found or not
4553   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4554     if (!getAssociatedValue().getType()->isIntegerTy())
4555       return false;
4556 
4557     const auto &AA =
4558         A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false);
4559 
4560     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4561 
4562     if (!COpt.hasValue()) {
4563       SimplifiedAssociatedValue = llvm::None;
4564       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4565       return true;
4566     }
4567     if (auto *C = COpt.getValue()) {
4568       SimplifiedAssociatedValue = C;
4569       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4570       return true;
4571     }
4572     return false;
4573   }
4574 
4575   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4576     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4577       return true;
4578     if (askSimplifiedValueFor<AAPotentialValues>(A))
4579       return true;
4580     return false;
4581   }
4582 
4583   /// See AbstractAttribute::manifest(...).
4584   ChangeStatus manifest(Attributor &A) override {
4585     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4586 
4587     if (SimplifiedAssociatedValue.hasValue() &&
4588         !SimplifiedAssociatedValue.getValue())
4589       return Changed;
4590 
4591     Value &V = getAssociatedValue();
4592     auto *C = SimplifiedAssociatedValue.hasValue()
4593                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4594                   : UndefValue::get(V.getType());
4595     if (C) {
4596       // We can replace the AssociatedValue with the constant.
4597       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4598         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4599                           << " :: " << *this << "\n");
4600         if (A.changeValueAfterManifest(V, *C))
4601           Changed = ChangeStatus::CHANGED;
4602       }
4603     }
4604 
4605     return Changed | AAValueSimplify::manifest(A);
4606   }
4607 
4608   /// See AbstractState::indicatePessimisticFixpoint(...).
4609   ChangeStatus indicatePessimisticFixpoint() override {
4610     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4611     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4612     SimplifiedAssociatedValue = &getAssociatedValue();
4613     indicateOptimisticFixpoint();
4614     return ChangeStatus::CHANGED;
4615   }
4616 
4617 protected:
4618   // An assumed simplified value. Initially, it is set to Optional::None, which
4619   // means that the value is not clear under current assumption. If in the
4620   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4621   // returns orignal associated value.
4622   Optional<Value *> SimplifiedAssociatedValue;
4623 };
4624 
4625 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4626   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4627       : AAValueSimplifyImpl(IRP, A) {}
4628 
4629   void initialize(Attributor &A) override {
4630     AAValueSimplifyImpl::initialize(A);
4631     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4632       indicatePessimisticFixpoint();
4633     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4634                  Attribute::StructRet, Attribute::Nest},
4635                 /* IgnoreSubsumingPositions */ true))
4636       indicatePessimisticFixpoint();
4637 
4638     // FIXME: This is a hack to prevent us from propagating function poiner in
4639     // the new pass manager CGSCC pass as it creates call edges the
4640     // CallGraphUpdater cannot handle yet.
4641     Value &V = getAssociatedValue();
4642     if (V.getType()->isPointerTy() &&
4643         V.getType()->getPointerElementType()->isFunctionTy() &&
4644         !A.isModulePass())
4645       indicatePessimisticFixpoint();
4646   }
4647 
4648   /// See AbstractAttribute::updateImpl(...).
4649   ChangeStatus updateImpl(Attributor &A) override {
4650     // Byval is only replacable if it is readonly otherwise we would write into
4651     // the replaced value and not the copy that byval creates implicitly.
4652     Argument *Arg = getAssociatedArgument();
4653     if (Arg->hasByValAttr()) {
4654       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4655       //       there is no race by not copying a constant byval.
4656       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4657       if (!MemAA.isAssumedReadOnly())
4658         return indicatePessimisticFixpoint();
4659     }
4660 
4661     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4662 
4663     auto PredForCallSite = [&](AbstractCallSite ACS) {
4664       const IRPosition &ACSArgPos =
4665           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4666       // Check if a coresponding argument was found or if it is on not
4667       // associated (which can happen for callback calls).
4668       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4669         return false;
4670 
4671       // We can only propagate thread independent values through callbacks.
4672       // This is different to direct/indirect call sites because for them we
4673       // know the thread executing the caller and callee is the same. For
4674       // callbacks this is not guaranteed, thus a thread dependent value could
4675       // be different for the caller and callee, making it invalid to propagate.
4676       Value &ArgOp = ACSArgPos.getAssociatedValue();
4677       if (ACS.isCallbackCall())
4678         if (auto *C = dyn_cast<Constant>(&ArgOp))
4679           if (C->isThreadDependent())
4680             return false;
4681       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4682     };
4683 
4684     bool AllCallSitesKnown;
4685     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4686                                 AllCallSitesKnown))
4687       if (!askSimplifiedValueForOtherAAs(A))
4688         return indicatePessimisticFixpoint();
4689 
4690     // If a candicate was found in this update, return CHANGED.
4691     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4692                ? ChangeStatus::UNCHANGED
4693                : ChangeStatus ::CHANGED;
4694   }
4695 
4696   /// See AbstractAttribute::trackStatistics()
4697   void trackStatistics() const override {
4698     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4699   }
4700 };
4701 
4702 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4703   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4704       : AAValueSimplifyImpl(IRP, A) {}
4705 
4706   /// See AbstractAttribute::updateImpl(...).
4707   ChangeStatus updateImpl(Attributor &A) override {
4708     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4709 
4710     auto PredForReturned = [&](Value &V) {
4711       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4712     };
4713 
4714     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4715       if (!askSimplifiedValueForOtherAAs(A))
4716         return indicatePessimisticFixpoint();
4717 
4718     // If a candicate was found in this update, return CHANGED.
4719     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4720                ? ChangeStatus::UNCHANGED
4721                : ChangeStatus ::CHANGED;
4722   }
4723 
4724   ChangeStatus manifest(Attributor &A) override {
4725     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4726 
4727     if (SimplifiedAssociatedValue.hasValue() &&
4728         !SimplifiedAssociatedValue.getValue())
4729       return Changed;
4730 
4731     Value &V = getAssociatedValue();
4732     auto *C = SimplifiedAssociatedValue.hasValue()
4733                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4734                   : UndefValue::get(V.getType());
4735     if (C) {
4736       auto PredForReturned =
4737           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4738             // We can replace the AssociatedValue with the constant.
4739             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4740               return true;
4741 
4742             for (ReturnInst *RI : RetInsts) {
4743               if (RI->getFunction() != getAnchorScope())
4744                 continue;
4745               auto *RC = C;
4746               if (RC->getType() != RI->getReturnValue()->getType())
4747                 RC = ConstantExpr::getBitCast(RC,
4748                                               RI->getReturnValue()->getType());
4749               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4750                                 << " in " << *RI << " :: " << *this << "\n");
4751               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4752                 Changed = ChangeStatus::CHANGED;
4753             }
4754             return true;
4755           };
4756       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4757     }
4758 
4759     return Changed | AAValueSimplify::manifest(A);
4760   }
4761 
4762   /// See AbstractAttribute::trackStatistics()
4763   void trackStatistics() const override {
4764     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4765   }
4766 };
4767 
4768 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4769   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4770       : AAValueSimplifyImpl(IRP, A) {}
4771 
4772   /// See AbstractAttribute::initialize(...).
4773   void initialize(Attributor &A) override {
4774     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4775     //        Needs investigation.
4776     // AAValueSimplifyImpl::initialize(A);
4777     Value &V = getAnchorValue();
4778 
4779     // TODO: add other stuffs
4780     if (isa<Constant>(V))
4781       indicatePessimisticFixpoint();
4782   }
4783 
4784   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4785   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4786   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4787   /// updated and \p Changed is set appropriately.
4788   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4789                               ChangeStatus &Changed) {
4790     if (!ICmp)
4791       return false;
4792     if (!ICmp->isEquality())
4793       return false;
4794 
4795     // This is a comparison with == or !-. We check for nullptr now.
4796     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4797     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4798     if (!Op0IsNull && !Op1IsNull)
4799       return false;
4800 
4801     LLVMContext &Ctx = ICmp->getContext();
4802     // Check for `nullptr ==/!= nullptr` first:
4803     if (Op0IsNull && Op1IsNull) {
4804       Value *NewVal = ConstantInt::get(
4805           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4806       assert(!SimplifiedAssociatedValue.hasValue() &&
4807              "Did not expect non-fixed value for constant comparison");
4808       SimplifiedAssociatedValue = NewVal;
4809       indicateOptimisticFixpoint();
4810       Changed = ChangeStatus::CHANGED;
4811       return true;
4812     }
4813 
4814     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4815     // non-nullptr operand and if we assume it's non-null we can conclude the
4816     // result of the comparison.
4817     assert((Op0IsNull || Op1IsNull) &&
4818            "Expected nullptr versus non-nullptr comparison at this point");
4819 
4820     // The index is the operand that we assume is not null.
4821     unsigned PtrIdx = Op0IsNull;
4822     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4823         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)));
4824     if (!PtrNonNullAA.isAssumedNonNull())
4825       return false;
4826 
4827     // The new value depends on the predicate, true for != and false for ==.
4828     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4829                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4830 
4831     assert((!SimplifiedAssociatedValue.hasValue() ||
4832             SimplifiedAssociatedValue == NewVal) &&
4833            "Did not expect to change value for zero-comparison");
4834 
4835     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4836     SimplifiedAssociatedValue = NewVal;
4837 
4838     if (PtrNonNullAA.isKnownNonNull())
4839       indicateOptimisticFixpoint();
4840 
4841     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4842     return true;
4843   }
4844 
4845   /// See AbstractAttribute::updateImpl(...).
4846   ChangeStatus updateImpl(Attributor &A) override {
4847     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4848 
4849     ChangeStatus Changed;
4850     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4851                                Changed))
4852       return Changed;
4853 
4854     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4855                             bool Stripped) -> bool {
4856       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4857       if (!Stripped && this == &AA) {
4858         // TODO: Look the instruction and check recursively.
4859 
4860         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4861                           << "\n");
4862         return false;
4863       }
4864       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4865     };
4866 
4867     bool Dummy = false;
4868     if (!genericValueTraversal<AAValueSimplify, bool>(
4869             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4870             /* UseValueSimplify */ false))
4871       if (!askSimplifiedValueForOtherAAs(A))
4872         return indicatePessimisticFixpoint();
4873 
4874     // If a candicate was found in this update, return CHANGED.
4875 
4876     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4877                ? ChangeStatus::UNCHANGED
4878                : ChangeStatus ::CHANGED;
4879   }
4880 
4881   /// See AbstractAttribute::trackStatistics()
4882   void trackStatistics() const override {
4883     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4884   }
4885 };
4886 
4887 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4888   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4889       : AAValueSimplifyImpl(IRP, A) {}
4890 
4891   /// See AbstractAttribute::initialize(...).
4892   void initialize(Attributor &A) override {
4893     SimplifiedAssociatedValue = &getAnchorValue();
4894     indicateOptimisticFixpoint();
4895   }
4896   /// See AbstractAttribute::initialize(...).
4897   ChangeStatus updateImpl(Attributor &A) override {
4898     llvm_unreachable(
4899         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4900   }
4901   /// See AbstractAttribute::trackStatistics()
4902   void trackStatistics() const override {
4903     STATS_DECLTRACK_FN_ATTR(value_simplify)
4904   }
4905 };
4906 
4907 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4908   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4909       : AAValueSimplifyFunction(IRP, A) {}
4910   /// See AbstractAttribute::trackStatistics()
4911   void trackStatistics() const override {
4912     STATS_DECLTRACK_CS_ATTR(value_simplify)
4913   }
4914 };
4915 
4916 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4917   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4918       : AAValueSimplifyReturned(IRP, A) {}
4919 
4920   /// See AbstractAttribute::manifest(...).
4921   ChangeStatus manifest(Attributor &A) override {
4922     return AAValueSimplifyImpl::manifest(A);
4923   }
4924 
4925   void trackStatistics() const override {
4926     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4927   }
4928 };
4929 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4930   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4931       : AAValueSimplifyFloating(IRP, A) {}
4932 
4933   /// See AbstractAttribute::manifest(...).
4934   ChangeStatus manifest(Attributor &A) override {
4935     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4936 
4937     if (SimplifiedAssociatedValue.hasValue() &&
4938         !SimplifiedAssociatedValue.getValue())
4939       return Changed;
4940 
4941     Value &V = getAssociatedValue();
4942     auto *C = SimplifiedAssociatedValue.hasValue()
4943                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4944                   : UndefValue::get(V.getType());
4945     if (C) {
4946       Use &U = cast<CallBase>(&getAnchorValue())
4947                    ->getArgOperandUse(getCallSiteArgNo());
4948       // We can replace the AssociatedValue with the constant.
4949       if (&V != C && V.getType() == C->getType()) {
4950         if (A.changeUseAfterManifest(U, *C))
4951           Changed = ChangeStatus::CHANGED;
4952       }
4953     }
4954 
4955     return Changed | AAValueSimplify::manifest(A);
4956   }
4957 
4958   void trackStatistics() const override {
4959     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4960   }
4961 };
4962 
4963 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4964 struct AAHeapToStackImpl : public AAHeapToStack {
4965   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4966       : AAHeapToStack(IRP, A) {}
4967 
4968   const std::string getAsStr() const override {
4969     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4970   }
4971 
4972   ChangeStatus manifest(Attributor &A) override {
4973     assert(getState().isValidState() &&
4974            "Attempted to manifest an invalid state!");
4975 
4976     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4977     Function *F = getAnchorScope();
4978     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4979 
4980     for (Instruction *MallocCall : MallocCalls) {
4981       // This malloc cannot be replaced.
4982       if (BadMallocCalls.count(MallocCall))
4983         continue;
4984 
4985       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4986         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4987         A.deleteAfterManifest(*FreeCall);
4988         HasChanged = ChangeStatus::CHANGED;
4989       }
4990 
4991       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4992                         << "\n");
4993 
4994       Align Alignment;
4995       Constant *Size;
4996       if (isCallocLikeFn(MallocCall, TLI)) {
4997         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4998         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4999         APInt TotalSize = SizeT->getValue() * Num->getValue();
5000         Size =
5001             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
5002       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5003         Size = cast<ConstantInt>(MallocCall->getOperand(1));
5004         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5005                                    ->getValue()
5006                                    .getZExtValue())
5007                         .valueOrOne();
5008       } else {
5009         Size = cast<ConstantInt>(MallocCall->getOperand(0));
5010       }
5011 
5012       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5013       Instruction *AI =
5014           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5015                          "", MallocCall->getNextNode());
5016 
5017       if (AI->getType() != MallocCall->getType())
5018         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5019                              AI->getNextNode());
5020 
5021       A.changeValueAfterManifest(*MallocCall, *AI);
5022 
5023       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5024         auto *NBB = II->getNormalDest();
5025         BranchInst::Create(NBB, MallocCall->getParent());
5026         A.deleteAfterManifest(*MallocCall);
5027       } else {
5028         A.deleteAfterManifest(*MallocCall);
5029       }
5030 
5031       // Zero out the allocated memory if it was a calloc.
5032       if (isCallocLikeFn(MallocCall, TLI)) {
5033         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5034                                    AI->getNextNode());
5035         Value *Ops[] = {
5036             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5037             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5038 
5039         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5040         Module *M = F->getParent();
5041         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5042         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5043       }
5044       HasChanged = ChangeStatus::CHANGED;
5045     }
5046 
5047     return HasChanged;
5048   }
5049 
5050   /// Collection of all malloc calls in a function.
5051   SmallSetVector<Instruction *, 4> MallocCalls;
5052 
5053   /// Collection of malloc calls that cannot be converted.
5054   DenseSet<const Instruction *> BadMallocCalls;
5055 
5056   /// A map for each malloc call to the set of associated free calls.
5057   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5058 
5059   ChangeStatus updateImpl(Attributor &A) override;
5060 };
5061 
5062 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5063   const Function *F = getAnchorScope();
5064   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5065 
5066   MustBeExecutedContextExplorer &Explorer =
5067       A.getInfoCache().getMustBeExecutedContextExplorer();
5068 
5069   auto FreeCheck = [&](Instruction &I) {
5070     const auto &Frees = FreesForMalloc.lookup(&I);
5071     if (Frees.size() != 1)
5072       return false;
5073     Instruction *UniqueFree = *Frees.begin();
5074     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5075   };
5076 
5077   auto UsesCheck = [&](Instruction &I) {
5078     bool ValidUsesOnly = true;
5079     bool MustUse = true;
5080     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5081       Instruction *UserI = cast<Instruction>(U.getUser());
5082       if (isa<LoadInst>(UserI))
5083         return true;
5084       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5085         if (SI->getValueOperand() == U.get()) {
5086           LLVM_DEBUG(dbgs()
5087                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5088           ValidUsesOnly = false;
5089         } else {
5090           // A store into the malloc'ed memory is fine.
5091         }
5092         return true;
5093       }
5094       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5095         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5096           return true;
5097         // Record malloc.
5098         if (isFreeCall(UserI, TLI)) {
5099           if (MustUse) {
5100             FreesForMalloc[&I].insert(UserI);
5101           } else {
5102             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5103                               << *UserI << "\n");
5104             ValidUsesOnly = false;
5105           }
5106           return true;
5107         }
5108 
5109         unsigned ArgNo = CB->getArgOperandNo(&U);
5110 
5111         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5112             *this, IRPosition::callsite_argument(*CB, ArgNo));
5113 
5114         // If a callsite argument use is nofree, we are fine.
5115         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5116             *this, IRPosition::callsite_argument(*CB, ArgNo));
5117 
5118         if (!NoCaptureAA.isAssumedNoCapture() ||
5119             !ArgNoFreeAA.isAssumedNoFree()) {
5120           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5121           ValidUsesOnly = false;
5122         }
5123         return true;
5124       }
5125 
5126       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5127           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5128         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5129         Follow = true;
5130         return true;
5131       }
5132       // Unknown user for which we can not track uses further (in a way that
5133       // makes sense).
5134       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5135       ValidUsesOnly = false;
5136       return true;
5137     };
5138     A.checkForAllUses(Pred, *this, I);
5139     return ValidUsesOnly;
5140   };
5141 
5142   auto MallocCallocCheck = [&](Instruction &I) {
5143     if (BadMallocCalls.count(&I))
5144       return true;
5145 
5146     bool IsMalloc = isMallocLikeFn(&I, TLI);
5147     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5148     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5149     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5150       BadMallocCalls.insert(&I);
5151       return true;
5152     }
5153 
5154     if (IsMalloc) {
5155       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5156         if (Size->getValue().ule(MaxHeapToStackSize))
5157           if (UsesCheck(I) || FreeCheck(I)) {
5158             MallocCalls.insert(&I);
5159             return true;
5160           }
5161     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5162       // Only if the alignment and sizes are constant.
5163       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5164         if (Size->getValue().ule(MaxHeapToStackSize))
5165           if (UsesCheck(I) || FreeCheck(I)) {
5166             MallocCalls.insert(&I);
5167             return true;
5168           }
5169     } else if (IsCalloc) {
5170       bool Overflow = false;
5171       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5172         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5173           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5174                   .ule(MaxHeapToStackSize))
5175             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5176               MallocCalls.insert(&I);
5177               return true;
5178             }
5179     }
5180 
5181     BadMallocCalls.insert(&I);
5182     return true;
5183   };
5184 
5185   size_t NumBadMallocs = BadMallocCalls.size();
5186 
5187   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5188 
5189   if (NumBadMallocs != BadMallocCalls.size())
5190     return ChangeStatus::CHANGED;
5191 
5192   return ChangeStatus::UNCHANGED;
5193 }
5194 
5195 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5196   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5197       : AAHeapToStackImpl(IRP, A) {}
5198 
5199   /// See AbstractAttribute::trackStatistics().
5200   void trackStatistics() const override {
5201     STATS_DECL(
5202         MallocCalls, Function,
5203         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5204     for (auto *C : MallocCalls)
5205       if (!BadMallocCalls.count(C))
5206         ++BUILD_STAT_NAME(MallocCalls, Function);
5207   }
5208 };
5209 
5210 /// ----------------------- Privatizable Pointers ------------------------------
5211 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5212   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5213       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5214 
5215   ChangeStatus indicatePessimisticFixpoint() override {
5216     AAPrivatizablePtr::indicatePessimisticFixpoint();
5217     PrivatizableType = nullptr;
5218     return ChangeStatus::CHANGED;
5219   }
5220 
5221   /// Identify the type we can chose for a private copy of the underlying
5222   /// argument. None means it is not clear yet, nullptr means there is none.
5223   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5224 
5225   /// Return a privatizable type that encloses both T0 and T1.
5226   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5227   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5228     if (!T0.hasValue())
5229       return T1;
5230     if (!T1.hasValue())
5231       return T0;
5232     if (T0 == T1)
5233       return T0;
5234     return nullptr;
5235   }
5236 
5237   Optional<Type *> getPrivatizableType() const override {
5238     return PrivatizableType;
5239   }
5240 
5241   const std::string getAsStr() const override {
5242     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5243   }
5244 
5245 protected:
5246   Optional<Type *> PrivatizableType;
5247 };
5248 
5249 // TODO: Do this for call site arguments (probably also other values) as well.
5250 
5251 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5252   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5253       : AAPrivatizablePtrImpl(IRP, A) {}
5254 
5255   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5256   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5257     // If this is a byval argument and we know all the call sites (so we can
5258     // rewrite them), there is no need to check them explicitly.
5259     bool AllCallSitesKnown;
5260     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5261         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5262                                true, AllCallSitesKnown))
5263       return getAssociatedValue().getType()->getPointerElementType();
5264 
5265     Optional<Type *> Ty;
5266     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5267 
5268     // Make sure the associated call site argument has the same type at all call
5269     // sites and it is an allocation we know is safe to privatize, for now that
5270     // means we only allow alloca instructions.
5271     // TODO: We can additionally analyze the accesses in the callee to  create
5272     //       the type from that information instead. That is a little more
5273     //       involved and will be done in a follow up patch.
5274     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5275       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5276       // Check if a coresponding argument was found or if it is one not
5277       // associated (which can happen for callback calls).
5278       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5279         return false;
5280 
5281       // Check that all call sites agree on a type.
5282       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5283       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5284 
5285       LLVM_DEBUG({
5286         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5287         if (CSTy.hasValue() && CSTy.getValue())
5288           CSTy.getValue()->print(dbgs());
5289         else if (CSTy.hasValue())
5290           dbgs() << "<nullptr>";
5291         else
5292           dbgs() << "<none>";
5293       });
5294 
5295       Ty = combineTypes(Ty, CSTy);
5296 
5297       LLVM_DEBUG({
5298         dbgs() << " : New Type: ";
5299         if (Ty.hasValue() && Ty.getValue())
5300           Ty.getValue()->print(dbgs());
5301         else if (Ty.hasValue())
5302           dbgs() << "<nullptr>";
5303         else
5304           dbgs() << "<none>";
5305         dbgs() << "\n";
5306       });
5307 
5308       return !Ty.hasValue() || Ty.getValue();
5309     };
5310 
5311     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5312       return nullptr;
5313     return Ty;
5314   }
5315 
5316   /// See AbstractAttribute::updateImpl(...).
5317   ChangeStatus updateImpl(Attributor &A) override {
5318     PrivatizableType = identifyPrivatizableType(A);
5319     if (!PrivatizableType.hasValue())
5320       return ChangeStatus::UNCHANGED;
5321     if (!PrivatizableType.getValue())
5322       return indicatePessimisticFixpoint();
5323 
5324     // The dependence is optional so we don't give up once we give up on the
5325     // alignment.
5326     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5327                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5328 
5329     // Avoid arguments with padding for now.
5330     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5331         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5332                                                 A.getInfoCache().getDL())) {
5333       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5334       return indicatePessimisticFixpoint();
5335     }
5336 
5337     // Verify callee and caller agree on how the promoted argument would be
5338     // passed.
5339     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5340     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5341     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5342     Function &Fn = *getIRPosition().getAnchorScope();
5343     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5344     ArgsToPromote.insert(getAssociatedArgument());
5345     const auto *TTI =
5346         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5347     if (!TTI ||
5348         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5349             Fn, *TTI, ArgsToPromote, Dummy) ||
5350         ArgsToPromote.empty()) {
5351       LLVM_DEBUG(
5352           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5353                  << Fn.getName() << "\n");
5354       return indicatePessimisticFixpoint();
5355     }
5356 
5357     // Collect the types that will replace the privatizable type in the function
5358     // signature.
5359     SmallVector<Type *, 16> ReplacementTypes;
5360     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5361 
5362     // Register a rewrite of the argument.
5363     Argument *Arg = getAssociatedArgument();
5364     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5365       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5366       return indicatePessimisticFixpoint();
5367     }
5368 
5369     unsigned ArgNo = Arg->getArgNo();
5370 
5371     // Helper to check if for the given call site the associated argument is
5372     // passed to a callback where the privatization would be different.
5373     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5374       SmallVector<const Use *, 4> CallbackUses;
5375       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5376       for (const Use *U : CallbackUses) {
5377         AbstractCallSite CBACS(U);
5378         assert(CBACS && CBACS.isCallbackCall());
5379         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5380           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5381 
5382           LLVM_DEBUG({
5383             dbgs()
5384                 << "[AAPrivatizablePtr] Argument " << *Arg
5385                 << "check if can be privatized in the context of its parent ("
5386                 << Arg->getParent()->getName()
5387                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5388                    "callback ("
5389                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5390                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5391                 << CBACS.getCallArgOperand(CBArg) << " vs "
5392                 << CB.getArgOperand(ArgNo) << "\n"
5393                 << "[AAPrivatizablePtr] " << CBArg << " : "
5394                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5395           });
5396 
5397           if (CBArgNo != int(ArgNo))
5398             continue;
5399           const auto &CBArgPrivAA =
5400               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5401           if (CBArgPrivAA.isValidState()) {
5402             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5403             if (!CBArgPrivTy.hasValue())
5404               continue;
5405             if (CBArgPrivTy.getValue() == PrivatizableType)
5406               continue;
5407           }
5408 
5409           LLVM_DEBUG({
5410             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5411                    << " cannot be privatized in the context of its parent ("
5412                    << Arg->getParent()->getName()
5413                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5414                       "callback ("
5415                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5416                    << ").\n[AAPrivatizablePtr] for which the argument "
5417                       "privatization is not compatible.\n";
5418           });
5419           return false;
5420         }
5421       }
5422       return true;
5423     };
5424 
5425     // Helper to check if for the given call site the associated argument is
5426     // passed to a direct call where the privatization would be different.
5427     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5428       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5429       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5430       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5431              "Expected a direct call operand for callback call operand");
5432 
5433       LLVM_DEBUG({
5434         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5435                << " check if be privatized in the context of its parent ("
5436                << Arg->getParent()->getName()
5437                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5438                   "direct call of ("
5439                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5440                << ").\n";
5441       });
5442 
5443       Function *DCCallee = DC->getCalledFunction();
5444       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5445         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5446             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5447         if (DCArgPrivAA.isValidState()) {
5448           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5449           if (!DCArgPrivTy.hasValue())
5450             return true;
5451           if (DCArgPrivTy.getValue() == PrivatizableType)
5452             return true;
5453         }
5454       }
5455 
5456       LLVM_DEBUG({
5457         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5458                << " cannot be privatized in the context of its parent ("
5459                << Arg->getParent()->getName()
5460                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5461                   "direct call of ("
5462                << ACS.getInstruction()->getCalledFunction()->getName()
5463                << ").\n[AAPrivatizablePtr] for which the argument "
5464                   "privatization is not compatible.\n";
5465       });
5466       return false;
5467     };
5468 
5469     // Helper to check if the associated argument is used at the given abstract
5470     // call site in a way that is incompatible with the privatization assumed
5471     // here.
5472     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5473       if (ACS.isDirectCall())
5474         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5475       if (ACS.isCallbackCall())
5476         return IsCompatiblePrivArgOfDirectCS(ACS);
5477       return false;
5478     };
5479 
5480     bool AllCallSitesKnown;
5481     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5482                                 AllCallSitesKnown))
5483       return indicatePessimisticFixpoint();
5484 
5485     return ChangeStatus::UNCHANGED;
5486   }
5487 
5488   /// Given a type to private \p PrivType, collect the constituates (which are
5489   /// used) in \p ReplacementTypes.
5490   static void
5491   identifyReplacementTypes(Type *PrivType,
5492                            SmallVectorImpl<Type *> &ReplacementTypes) {
5493     // TODO: For now we expand the privatization type to the fullest which can
5494     //       lead to dead arguments that need to be removed later.
5495     assert(PrivType && "Expected privatizable type!");
5496 
5497     // Traverse the type, extract constituate types on the outermost level.
5498     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5499       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5500         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5501     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5502       ReplacementTypes.append(PrivArrayType->getNumElements(),
5503                               PrivArrayType->getElementType());
5504     } else {
5505       ReplacementTypes.push_back(PrivType);
5506     }
5507   }
5508 
5509   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5510   /// The values needed are taken from the arguments of \p F starting at
5511   /// position \p ArgNo.
5512   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5513                                    unsigned ArgNo, Instruction &IP) {
5514     assert(PrivType && "Expected privatizable type!");
5515 
5516     IRBuilder<NoFolder> IRB(&IP);
5517     const DataLayout &DL = F.getParent()->getDataLayout();
5518 
5519     // Traverse the type, build GEPs and stores.
5520     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5521       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5522       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5523         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5524         Value *Ptr = constructPointer(
5525             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5526         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5527       }
5528     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5529       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5530       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5531       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5532         Value *Ptr =
5533             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5534         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5535       }
5536     } else {
5537       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5538     }
5539   }
5540 
5541   /// Extract values from \p Base according to the type \p PrivType at the
5542   /// call position \p ACS. The values are appended to \p ReplacementValues.
5543   void createReplacementValues(Align Alignment, Type *PrivType,
5544                                AbstractCallSite ACS, Value *Base,
5545                                SmallVectorImpl<Value *> &ReplacementValues) {
5546     assert(Base && "Expected base value!");
5547     assert(PrivType && "Expected privatizable type!");
5548     Instruction *IP = ACS.getInstruction();
5549 
5550     IRBuilder<NoFolder> IRB(IP);
5551     const DataLayout &DL = IP->getModule()->getDataLayout();
5552 
5553     if (Base->getType()->getPointerElementType() != PrivType)
5554       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5555                                                  "", ACS.getInstruction());
5556 
5557     // Traverse the type, build GEPs and loads.
5558     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5559       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5560       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5561         Type *PointeeTy = PrivStructType->getElementType(u);
5562         Value *Ptr =
5563             constructPointer(PointeeTy->getPointerTo(), Base,
5564                              PrivStructLayout->getElementOffset(u), IRB, DL);
5565         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5566         L->setAlignment(Alignment);
5567         ReplacementValues.push_back(L);
5568       }
5569     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5570       Type *PointeeTy = PrivArrayType->getElementType();
5571       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5572       Type *PointeePtrTy = PointeeTy->getPointerTo();
5573       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5574         Value *Ptr =
5575             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5576         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5577         L->setAlignment(Alignment);
5578         ReplacementValues.push_back(L);
5579       }
5580     } else {
5581       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5582       L->setAlignment(Alignment);
5583       ReplacementValues.push_back(L);
5584     }
5585   }
5586 
5587   /// See AbstractAttribute::manifest(...)
5588   ChangeStatus manifest(Attributor &A) override {
5589     if (!PrivatizableType.hasValue())
5590       return ChangeStatus::UNCHANGED;
5591     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5592 
5593     // Collect all tail calls in the function as we cannot allow new allocas to
5594     // escape into tail recursion.
5595     // TODO: Be smarter about new allocas escaping into tail calls.
5596     SmallVector<CallInst *, 16> TailCalls;
5597     if (!A.checkForAllInstructions(
5598             [&](Instruction &I) {
5599               CallInst &CI = cast<CallInst>(I);
5600               if (CI.isTailCall())
5601                 TailCalls.push_back(&CI);
5602               return true;
5603             },
5604             *this, {Instruction::Call}))
5605       return ChangeStatus::UNCHANGED;
5606 
5607     Argument *Arg = getAssociatedArgument();
5608     // Query AAAlign attribute for alignment of associated argument to
5609     // determine the best alignment of loads.
5610     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5611 
5612     // Callback to repair the associated function. A new alloca is placed at the
5613     // beginning and initialized with the values passed through arguments. The
5614     // new alloca replaces the use of the old pointer argument.
5615     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5616         [=](const Attributor::ArgumentReplacementInfo &ARI,
5617             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5618           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5619           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5620           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5621                                     Arg->getName() + ".priv", IP);
5622           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5623                                ArgIt->getArgNo(), *IP);
5624           Arg->replaceAllUsesWith(AI);
5625 
5626           for (CallInst *CI : TailCalls)
5627             CI->setTailCall(false);
5628         };
5629 
5630     // Callback to repair a call site of the associated function. The elements
5631     // of the privatizable type are loaded prior to the call and passed to the
5632     // new function version.
5633     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5634         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5635                       AbstractCallSite ACS,
5636                       SmallVectorImpl<Value *> &NewArgOperands) {
5637           // When no alignment is specified for the load instruction,
5638           // natural alignment is assumed.
5639           createReplacementValues(
5640               assumeAligned(AlignAA.getAssumedAlign()),
5641               PrivatizableType.getValue(), ACS,
5642               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5643               NewArgOperands);
5644         };
5645 
5646     // Collect the types that will replace the privatizable type in the function
5647     // signature.
5648     SmallVector<Type *, 16> ReplacementTypes;
5649     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5650 
5651     // Register a rewrite of the argument.
5652     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5653                                            std::move(FnRepairCB),
5654                                            std::move(ACSRepairCB)))
5655       return ChangeStatus::CHANGED;
5656     return ChangeStatus::UNCHANGED;
5657   }
5658 
5659   /// See AbstractAttribute::trackStatistics()
5660   void trackStatistics() const override {
5661     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5662   }
5663 };
5664 
5665 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5666   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5667       : AAPrivatizablePtrImpl(IRP, A) {}
5668 
5669   /// See AbstractAttribute::initialize(...).
5670   virtual void initialize(Attributor &A) override {
5671     // TODO: We can privatize more than arguments.
5672     indicatePessimisticFixpoint();
5673   }
5674 
5675   ChangeStatus updateImpl(Attributor &A) override {
5676     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5677                      "updateImpl will not be called");
5678   }
5679 
5680   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5681   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5682     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5683     if (!Obj) {
5684       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5685       return nullptr;
5686     }
5687 
5688     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5689       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5690         if (CI->isOne())
5691           return Obj->getType()->getPointerElementType();
5692     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5693       auto &PrivArgAA =
5694           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5695       if (PrivArgAA.isAssumedPrivatizablePtr())
5696         return Obj->getType()->getPointerElementType();
5697     }
5698 
5699     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5700                          "alloca nor privatizable argument: "
5701                       << *Obj << "!\n");
5702     return nullptr;
5703   }
5704 
5705   /// See AbstractAttribute::trackStatistics()
5706   void trackStatistics() const override {
5707     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5708   }
5709 };
5710 
5711 struct AAPrivatizablePtrCallSiteArgument final
5712     : public AAPrivatizablePtrFloating {
5713   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5714       : AAPrivatizablePtrFloating(IRP, A) {}
5715 
5716   /// See AbstractAttribute::initialize(...).
5717   void initialize(Attributor &A) override {
5718     if (getIRPosition().hasAttr(Attribute::ByVal))
5719       indicateOptimisticFixpoint();
5720   }
5721 
5722   /// See AbstractAttribute::updateImpl(...).
5723   ChangeStatus updateImpl(Attributor &A) override {
5724     PrivatizableType = identifyPrivatizableType(A);
5725     if (!PrivatizableType.hasValue())
5726       return ChangeStatus::UNCHANGED;
5727     if (!PrivatizableType.getValue())
5728       return indicatePessimisticFixpoint();
5729 
5730     const IRPosition &IRP = getIRPosition();
5731     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5732     if (!NoCaptureAA.isAssumedNoCapture()) {
5733       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5734       return indicatePessimisticFixpoint();
5735     }
5736 
5737     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5738     if (!NoAliasAA.isAssumedNoAlias()) {
5739       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5740       return indicatePessimisticFixpoint();
5741     }
5742 
5743     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5744     if (!MemBehaviorAA.isAssumedReadOnly()) {
5745       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5746       return indicatePessimisticFixpoint();
5747     }
5748 
5749     return ChangeStatus::UNCHANGED;
5750   }
5751 
5752   /// See AbstractAttribute::trackStatistics()
5753   void trackStatistics() const override {
5754     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5755   }
5756 };
5757 
5758 struct AAPrivatizablePtrCallSiteReturned final
5759     : public AAPrivatizablePtrFloating {
5760   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5761       : AAPrivatizablePtrFloating(IRP, A) {}
5762 
5763   /// See AbstractAttribute::initialize(...).
5764   void initialize(Attributor &A) override {
5765     // TODO: We can privatize more than arguments.
5766     indicatePessimisticFixpoint();
5767   }
5768 
5769   /// See AbstractAttribute::trackStatistics()
5770   void trackStatistics() const override {
5771     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5772   }
5773 };
5774 
5775 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5776   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5777       : AAPrivatizablePtrFloating(IRP, A) {}
5778 
5779   /// See AbstractAttribute::initialize(...).
5780   void initialize(Attributor &A) override {
5781     // TODO: We can privatize more than arguments.
5782     indicatePessimisticFixpoint();
5783   }
5784 
5785   /// See AbstractAttribute::trackStatistics()
5786   void trackStatistics() const override {
5787     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5788   }
5789 };
5790 
5791 /// -------------------- Memory Behavior Attributes ----------------------------
5792 /// Includes read-none, read-only, and write-only.
5793 /// ----------------------------------------------------------------------------
5794 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5795   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5796       : AAMemoryBehavior(IRP, A) {}
5797 
5798   /// See AbstractAttribute::initialize(...).
5799   void initialize(Attributor &A) override {
5800     intersectAssumedBits(BEST_STATE);
5801     getKnownStateFromValue(getIRPosition(), getState());
5802     AAMemoryBehavior::initialize(A);
5803   }
5804 
5805   /// Return the memory behavior information encoded in the IR for \p IRP.
5806   static void getKnownStateFromValue(const IRPosition &IRP,
5807                                      BitIntegerState &State,
5808                                      bool IgnoreSubsumingPositions = false) {
5809     SmallVector<Attribute, 2> Attrs;
5810     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5811     for (const Attribute &Attr : Attrs) {
5812       switch (Attr.getKindAsEnum()) {
5813       case Attribute::ReadNone:
5814         State.addKnownBits(NO_ACCESSES);
5815         break;
5816       case Attribute::ReadOnly:
5817         State.addKnownBits(NO_WRITES);
5818         break;
5819       case Attribute::WriteOnly:
5820         State.addKnownBits(NO_READS);
5821         break;
5822       default:
5823         llvm_unreachable("Unexpected attribute!");
5824       }
5825     }
5826 
5827     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5828       if (!I->mayReadFromMemory())
5829         State.addKnownBits(NO_READS);
5830       if (!I->mayWriteToMemory())
5831         State.addKnownBits(NO_WRITES);
5832     }
5833   }
5834 
5835   /// See AbstractAttribute::getDeducedAttributes(...).
5836   void getDeducedAttributes(LLVMContext &Ctx,
5837                             SmallVectorImpl<Attribute> &Attrs) const override {
5838     assert(Attrs.size() == 0);
5839     if (isAssumedReadNone())
5840       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5841     else if (isAssumedReadOnly())
5842       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5843     else if (isAssumedWriteOnly())
5844       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5845     assert(Attrs.size() <= 1);
5846   }
5847 
5848   /// See AbstractAttribute::manifest(...).
5849   ChangeStatus manifest(Attributor &A) override {
5850     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5851       return ChangeStatus::UNCHANGED;
5852 
5853     const IRPosition &IRP = getIRPosition();
5854 
5855     // Check if we would improve the existing attributes first.
5856     SmallVector<Attribute, 4> DeducedAttrs;
5857     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5858     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5859           return IRP.hasAttr(Attr.getKindAsEnum(),
5860                              /* IgnoreSubsumingPositions */ true);
5861         }))
5862       return ChangeStatus::UNCHANGED;
5863 
5864     // Clear existing attributes.
5865     IRP.removeAttrs(AttrKinds);
5866 
5867     // Use the generic manifest method.
5868     return IRAttribute::manifest(A);
5869   }
5870 
5871   /// See AbstractState::getAsStr().
5872   const std::string getAsStr() const override {
5873     if (isAssumedReadNone())
5874       return "readnone";
5875     if (isAssumedReadOnly())
5876       return "readonly";
5877     if (isAssumedWriteOnly())
5878       return "writeonly";
5879     return "may-read/write";
5880   }
5881 
5882   /// The set of IR attributes AAMemoryBehavior deals with.
5883   static const Attribute::AttrKind AttrKinds[3];
5884 };
5885 
5886 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5887     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5888 
5889 /// Memory behavior attribute for a floating value.
5890 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5891   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5892       : AAMemoryBehaviorImpl(IRP, A) {}
5893 
5894   /// See AbstractAttribute::initialize(...).
5895   void initialize(Attributor &A) override {
5896     AAMemoryBehaviorImpl::initialize(A);
5897     addUsesOf(A, getAssociatedValue());
5898   }
5899 
5900   /// See AbstractAttribute::updateImpl(...).
5901   ChangeStatus updateImpl(Attributor &A) override;
5902 
5903   /// See AbstractAttribute::trackStatistics()
5904   void trackStatistics() const override {
5905     if (isAssumedReadNone())
5906       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5907     else if (isAssumedReadOnly())
5908       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5909     else if (isAssumedWriteOnly())
5910       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5911   }
5912 
5913 private:
5914   /// Return true if users of \p UserI might access the underlying
5915   /// variable/location described by \p U and should therefore be analyzed.
5916   bool followUsersOfUseIn(Attributor &A, const Use *U,
5917                           const Instruction *UserI);
5918 
5919   /// Update the state according to the effect of use \p U in \p UserI.
5920   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5921 
5922 protected:
5923   /// Add the uses of \p V to the `Uses` set we look at during the update step.
5924   void addUsesOf(Attributor &A, const Value &V);
5925 
5926   /// Container for (transitive) uses of the associated argument.
5927   SmallVector<const Use *, 8> Uses;
5928 
5929   /// Set to remember the uses we already traversed.
5930   SmallPtrSet<const Use *, 8> Visited;
5931 };
5932 
5933 /// Memory behavior attribute for function argument.
5934 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5935   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5936       : AAMemoryBehaviorFloating(IRP, A) {}
5937 
5938   /// See AbstractAttribute::initialize(...).
5939   void initialize(Attributor &A) override {
5940     intersectAssumedBits(BEST_STATE);
5941     const IRPosition &IRP = getIRPosition();
5942     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5943     // can query it when we use has/getAttr. That would allow us to reuse the
5944     // initialize of the base class here.
5945     bool HasByVal =
5946         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5947     getKnownStateFromValue(IRP, getState(),
5948                            /* IgnoreSubsumingPositions */ HasByVal);
5949 
5950     // Initialize the use vector with all direct uses of the associated value.
5951     Argument *Arg = getAssociatedArgument();
5952     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5953       indicatePessimisticFixpoint();
5954     } else {
5955       addUsesOf(A, *Arg);
5956     }
5957   }
5958 
5959   ChangeStatus manifest(Attributor &A) override {
5960     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5961     if (!getAssociatedValue().getType()->isPointerTy())
5962       return ChangeStatus::UNCHANGED;
5963 
5964     // TODO: From readattrs.ll: "inalloca parameters are always
5965     //                           considered written"
5966     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5967       removeKnownBits(NO_WRITES);
5968       removeAssumedBits(NO_WRITES);
5969     }
5970     return AAMemoryBehaviorFloating::manifest(A);
5971   }
5972 
5973   /// See AbstractAttribute::trackStatistics()
5974   void trackStatistics() const override {
5975     if (isAssumedReadNone())
5976       STATS_DECLTRACK_ARG_ATTR(readnone)
5977     else if (isAssumedReadOnly())
5978       STATS_DECLTRACK_ARG_ATTR(readonly)
5979     else if (isAssumedWriteOnly())
5980       STATS_DECLTRACK_ARG_ATTR(writeonly)
5981   }
5982 };
5983 
5984 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5985   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5986       : AAMemoryBehaviorArgument(IRP, A) {}
5987 
5988   /// See AbstractAttribute::initialize(...).
5989   void initialize(Attributor &A) override {
5990     // If we don't have an associated attribute this is either a variadic call
5991     // or an indirect call, either way, nothing to do here.
5992     Argument *Arg = getAssociatedArgument();
5993     if (!Arg) {
5994       indicatePessimisticFixpoint();
5995       return;
5996     }
5997     if (Arg->hasByValAttr()) {
5998       addKnownBits(NO_WRITES);
5999       removeKnownBits(NO_READS);
6000       removeAssumedBits(NO_READS);
6001     }
6002     AAMemoryBehaviorArgument::initialize(A);
6003     if (getAssociatedFunction()->isDeclaration())
6004       indicatePessimisticFixpoint();
6005   }
6006 
6007   /// See AbstractAttribute::updateImpl(...).
6008   ChangeStatus updateImpl(Attributor &A) override {
6009     // TODO: Once we have call site specific value information we can provide
6010     //       call site specific liveness liveness information and then it makes
6011     //       sense to specialize attributes for call sites arguments instead of
6012     //       redirecting requests to the callee argument.
6013     Argument *Arg = getAssociatedArgument();
6014     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6015     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
6016     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6017   }
6018 
6019   /// See AbstractAttribute::trackStatistics()
6020   void trackStatistics() const override {
6021     if (isAssumedReadNone())
6022       STATS_DECLTRACK_CSARG_ATTR(readnone)
6023     else if (isAssumedReadOnly())
6024       STATS_DECLTRACK_CSARG_ATTR(readonly)
6025     else if (isAssumedWriteOnly())
6026       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6027   }
6028 };
6029 
6030 /// Memory behavior attribute for a call site return position.
6031 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6032   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6033       : AAMemoryBehaviorFloating(IRP, A) {}
6034 
6035   /// See AbstractAttribute::initialize(...).
6036   void initialize(Attributor &A) override {
6037     AAMemoryBehaviorImpl::initialize(A);
6038     Function *F = getAssociatedFunction();
6039     if (!F || F->isDeclaration())
6040       indicatePessimisticFixpoint();
6041   }
6042 
6043   /// See AbstractAttribute::manifest(...).
6044   ChangeStatus manifest(Attributor &A) override {
6045     // We do not annotate returned values.
6046     return ChangeStatus::UNCHANGED;
6047   }
6048 
6049   /// See AbstractAttribute::trackStatistics()
6050   void trackStatistics() const override {}
6051 };
6052 
6053 /// An AA to represent the memory behavior function attributes.
6054 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6055   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6056       : AAMemoryBehaviorImpl(IRP, A) {}
6057 
6058   /// See AbstractAttribute::updateImpl(Attributor &A).
6059   virtual ChangeStatus updateImpl(Attributor &A) override;
6060 
6061   /// See AbstractAttribute::manifest(...).
6062   ChangeStatus manifest(Attributor &A) override {
6063     Function &F = cast<Function>(getAnchorValue());
6064     if (isAssumedReadNone()) {
6065       F.removeFnAttr(Attribute::ArgMemOnly);
6066       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6067       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6068     }
6069     return AAMemoryBehaviorImpl::manifest(A);
6070   }
6071 
6072   /// See AbstractAttribute::trackStatistics()
6073   void trackStatistics() const override {
6074     if (isAssumedReadNone())
6075       STATS_DECLTRACK_FN_ATTR(readnone)
6076     else if (isAssumedReadOnly())
6077       STATS_DECLTRACK_FN_ATTR(readonly)
6078     else if (isAssumedWriteOnly())
6079       STATS_DECLTRACK_FN_ATTR(writeonly)
6080   }
6081 };
6082 
6083 /// AAMemoryBehavior attribute for call sites.
6084 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6085   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6086       : AAMemoryBehaviorImpl(IRP, A) {}
6087 
6088   /// See AbstractAttribute::initialize(...).
6089   void initialize(Attributor &A) override {
6090     AAMemoryBehaviorImpl::initialize(A);
6091     Function *F = getAssociatedFunction();
6092     if (!F || F->isDeclaration())
6093       indicatePessimisticFixpoint();
6094   }
6095 
6096   /// See AbstractAttribute::updateImpl(...).
6097   ChangeStatus updateImpl(Attributor &A) override {
6098     // TODO: Once we have call site specific value information we can provide
6099     //       call site specific liveness liveness information and then it makes
6100     //       sense to specialize attributes for call sites arguments instead of
6101     //       redirecting requests to the callee argument.
6102     Function *F = getAssociatedFunction();
6103     const IRPosition &FnPos = IRPosition::function(*F);
6104     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
6105     return clampStateAndIndicateChange(getState(), FnAA.getState());
6106   }
6107 
6108   /// See AbstractAttribute::trackStatistics()
6109   void trackStatistics() const override {
6110     if (isAssumedReadNone())
6111       STATS_DECLTRACK_CS_ATTR(readnone)
6112     else if (isAssumedReadOnly())
6113       STATS_DECLTRACK_CS_ATTR(readonly)
6114     else if (isAssumedWriteOnly())
6115       STATS_DECLTRACK_CS_ATTR(writeonly)
6116   }
6117 };
6118 
6119 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6120 
6121   // The current assumed state used to determine a change.
6122   auto AssumedState = getAssumed();
6123 
6124   auto CheckRWInst = [&](Instruction &I) {
6125     // If the instruction has an own memory behavior state, use it to restrict
6126     // the local state. No further analysis is required as the other memory
6127     // state is as optimistic as it gets.
6128     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6129       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6130           *this, IRPosition::callsite_function(*CB));
6131       intersectAssumedBits(MemBehaviorAA.getAssumed());
6132       return !isAtFixpoint();
6133     }
6134 
6135     // Remove access kind modifiers if necessary.
6136     if (I.mayReadFromMemory())
6137       removeAssumedBits(NO_READS);
6138     if (I.mayWriteToMemory())
6139       removeAssumedBits(NO_WRITES);
6140     return !isAtFixpoint();
6141   };
6142 
6143   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6144     return indicatePessimisticFixpoint();
6145 
6146   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6147                                         : ChangeStatus::UNCHANGED;
6148 }
6149 
6150 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6151 
6152   const IRPosition &IRP = getIRPosition();
6153   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6154   AAMemoryBehavior::StateType &S = getState();
6155 
6156   // First, check the function scope. We take the known information and we avoid
6157   // work if the assumed information implies the current assumed information for
6158   // this attribute. This is a valid for all but byval arguments.
6159   Argument *Arg = IRP.getAssociatedArgument();
6160   AAMemoryBehavior::base_t FnMemAssumedState =
6161       AAMemoryBehavior::StateType::getWorstState();
6162   if (!Arg || !Arg->hasByValAttr()) {
6163     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6164         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6165     FnMemAssumedState = FnMemAA.getAssumed();
6166     S.addKnownBits(FnMemAA.getKnown());
6167     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6168       return ChangeStatus::UNCHANGED;
6169   }
6170 
6171   // Make sure the value is not captured (except through "return"), if
6172   // it is, any information derived would be irrelevant anyway as we cannot
6173   // check the potential aliases introduced by the capture. However, no need
6174   // to fall back to anythign less optimistic than the function state.
6175   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6176       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6177   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6178     S.intersectAssumedBits(FnMemAssumedState);
6179     return ChangeStatus::CHANGED;
6180   }
6181 
6182   // The current assumed state used to determine a change.
6183   auto AssumedState = S.getAssumed();
6184 
6185   // Liveness information to exclude dead users.
6186   // TODO: Take the FnPos once we have call site specific liveness information.
6187   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6188       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6189       /* TrackDependence */ false);
6190 
6191   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6192   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6193     const Use *U = Uses[i];
6194     Instruction *UserI = cast<Instruction>(U->getUser());
6195     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6196                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6197                       << "]\n");
6198     if (A.isAssumedDead(*U, this, &LivenessAA))
6199       continue;
6200 
6201     // Droppable users, e.g., llvm::assume does not actually perform any action.
6202     if (UserI->isDroppable())
6203       continue;
6204 
6205     // Check if the users of UserI should also be visited.
6206     if (followUsersOfUseIn(A, U, UserI))
6207       addUsesOf(A, *UserI);
6208 
6209     // If UserI might touch memory we analyze the use in detail.
6210     if (UserI->mayReadOrWriteMemory())
6211       analyzeUseIn(A, U, UserI);
6212   }
6213 
6214   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6215                                         : ChangeStatus::UNCHANGED;
6216 }
6217 
6218 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6219   SmallVector<const Use *, 8> WL;
6220   for (const Use &U : V.uses())
6221     WL.push_back(&U);
6222 
6223   while (!WL.empty()) {
6224     const Use *U = WL.pop_back_val();
6225     if (!Visited.insert(U).second)
6226       continue;
6227 
6228     const Instruction *UserI = cast<Instruction>(U->getUser());
6229     if (UserI->mayReadOrWriteMemory()) {
6230       Uses.push_back(U);
6231       continue;
6232     }
6233     if (!followUsersOfUseIn(A, U, UserI))
6234       continue;
6235     for (const Use &UU : UserI->uses())
6236       WL.push_back(&UU);
6237   }
6238 }
6239 
6240 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6241                                                   const Instruction *UserI) {
6242   // The loaded value is unrelated to the pointer argument, no need to
6243   // follow the users of the load.
6244   if (isa<LoadInst>(UserI))
6245     return false;
6246 
6247   // By default we follow all uses assuming UserI might leak information on U,
6248   // we have special handling for call sites operands though.
6249   const auto *CB = dyn_cast<CallBase>(UserI);
6250   if (!CB || !CB->isArgOperand(U))
6251     return true;
6252 
6253   // If the use is a call argument known not to be captured, the users of
6254   // the call do not need to be visited because they have to be unrelated to
6255   // the input. Note that this check is not trivial even though we disallow
6256   // general capturing of the underlying argument. The reason is that the
6257   // call might the argument "through return", which we allow and for which we
6258   // need to check call users.
6259   if (U->get()->getType()->isPointerTy()) {
6260     unsigned ArgNo = CB->getArgOperandNo(U);
6261     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6262         *this, IRPosition::callsite_argument(*CB, ArgNo),
6263         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6264     return !ArgNoCaptureAA.isAssumedNoCapture();
6265   }
6266 
6267   return true;
6268 }
6269 
6270 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6271                                             const Instruction *UserI) {
6272   assert(UserI->mayReadOrWriteMemory());
6273 
6274   switch (UserI->getOpcode()) {
6275   default:
6276     // TODO: Handle all atomics and other side-effect operations we know of.
6277     break;
6278   case Instruction::Load:
6279     // Loads cause the NO_READS property to disappear.
6280     removeAssumedBits(NO_READS);
6281     return;
6282 
6283   case Instruction::Store:
6284     // Stores cause the NO_WRITES property to disappear if the use is the
6285     // pointer operand. Note that we do assume that capturing was taken care of
6286     // somewhere else.
6287     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6288       removeAssumedBits(NO_WRITES);
6289     return;
6290 
6291   case Instruction::Call:
6292   case Instruction::CallBr:
6293   case Instruction::Invoke: {
6294     // For call sites we look at the argument memory behavior attribute (this
6295     // could be recursive!) in order to restrict our own state.
6296     const auto *CB = cast<CallBase>(UserI);
6297 
6298     // Give up on operand bundles.
6299     if (CB->isBundleOperand(U)) {
6300       indicatePessimisticFixpoint();
6301       return;
6302     }
6303 
6304     // Calling a function does read the function pointer, maybe write it if the
6305     // function is self-modifying.
6306     if (CB->isCallee(U)) {
6307       removeAssumedBits(NO_READS);
6308       break;
6309     }
6310 
6311     // Adjust the possible access behavior based on the information on the
6312     // argument.
6313     IRPosition Pos;
6314     if (U->get()->getType()->isPointerTy())
6315       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6316     else
6317       Pos = IRPosition::callsite_function(*CB);
6318     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6319         *this, Pos,
6320         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6321     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6322     // and at least "known".
6323     intersectAssumedBits(MemBehaviorAA.getAssumed());
6324     return;
6325   }
6326   };
6327 
6328   // Generally, look at the "may-properties" and adjust the assumed state if we
6329   // did not trigger special handling before.
6330   if (UserI->mayReadFromMemory())
6331     removeAssumedBits(NO_READS);
6332   if (UserI->mayWriteToMemory())
6333     removeAssumedBits(NO_WRITES);
6334 }
6335 
6336 } // namespace
6337 
6338 /// -------------------- Memory Locations Attributes ---------------------------
6339 /// Includes read-none, argmemonly, inaccessiblememonly,
6340 /// inaccessiblememorargmemonly
6341 /// ----------------------------------------------------------------------------
6342 
6343 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6344     AAMemoryLocation::MemoryLocationsKind MLK) {
6345   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6346     return "all memory";
6347   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6348     return "no memory";
6349   std::string S = "memory:";
6350   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6351     S += "stack,";
6352   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6353     S += "constant,";
6354   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6355     S += "internal global,";
6356   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6357     S += "external global,";
6358   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6359     S += "argument,";
6360   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6361     S += "inaccessible,";
6362   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6363     S += "malloced,";
6364   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6365     S += "unknown,";
6366   S.pop_back();
6367   return S;
6368 }
6369 
6370 namespace {
6371 struct AAMemoryLocationImpl : public AAMemoryLocation {
6372 
6373   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6374       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6375     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6376       AccessKind2Accesses[u] = nullptr;
6377   }
6378 
6379   ~AAMemoryLocationImpl() {
6380     // The AccessSets are allocated via a BumpPtrAllocator, we call
6381     // the destructor manually.
6382     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6383       if (AccessKind2Accesses[u])
6384         AccessKind2Accesses[u]->~AccessSet();
6385   }
6386 
6387   /// See AbstractAttribute::initialize(...).
6388   void initialize(Attributor &A) override {
6389     intersectAssumedBits(BEST_STATE);
6390     getKnownStateFromValue(A, getIRPosition(), getState());
6391     AAMemoryLocation::initialize(A);
6392   }
6393 
6394   /// Return the memory behavior information encoded in the IR for \p IRP.
6395   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6396                                      BitIntegerState &State,
6397                                      bool IgnoreSubsumingPositions = false) {
6398     // For internal functions we ignore `argmemonly` and
6399     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6400     // constant propagation. It is unclear if this is the best way but it is
6401     // unlikely this will cause real performance problems. If we are deriving
6402     // attributes for the anchor function we even remove the attribute in
6403     // addition to ignoring it.
6404     bool UseArgMemOnly = true;
6405     Function *AnchorFn = IRP.getAnchorScope();
6406     if (AnchorFn && A.isRunOn(*AnchorFn))
6407       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6408 
6409     SmallVector<Attribute, 2> Attrs;
6410     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6411     for (const Attribute &Attr : Attrs) {
6412       switch (Attr.getKindAsEnum()) {
6413       case Attribute::ReadNone:
6414         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6415         break;
6416       case Attribute::InaccessibleMemOnly:
6417         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6418         break;
6419       case Attribute::ArgMemOnly:
6420         if (UseArgMemOnly)
6421           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6422         else
6423           IRP.removeAttrs({Attribute::ArgMemOnly});
6424         break;
6425       case Attribute::InaccessibleMemOrArgMemOnly:
6426         if (UseArgMemOnly)
6427           State.addKnownBits(inverseLocation(
6428               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6429         else
6430           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6431         break;
6432       default:
6433         llvm_unreachable("Unexpected attribute!");
6434       }
6435     }
6436   }
6437 
6438   /// See AbstractAttribute::getDeducedAttributes(...).
6439   void getDeducedAttributes(LLVMContext &Ctx,
6440                             SmallVectorImpl<Attribute> &Attrs) const override {
6441     assert(Attrs.size() == 0);
6442     if (isAssumedReadNone()) {
6443       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6444     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6445       if (isAssumedInaccessibleMemOnly())
6446         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6447       else if (isAssumedArgMemOnly())
6448         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6449       else if (isAssumedInaccessibleOrArgMemOnly())
6450         Attrs.push_back(
6451             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6452     }
6453     assert(Attrs.size() <= 1);
6454   }
6455 
6456   /// See AbstractAttribute::manifest(...).
6457   ChangeStatus manifest(Attributor &A) override {
6458     const IRPosition &IRP = getIRPosition();
6459 
6460     // Check if we would improve the existing attributes first.
6461     SmallVector<Attribute, 4> DeducedAttrs;
6462     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6463     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6464           return IRP.hasAttr(Attr.getKindAsEnum(),
6465                              /* IgnoreSubsumingPositions */ true);
6466         }))
6467       return ChangeStatus::UNCHANGED;
6468 
6469     // Clear existing attributes.
6470     IRP.removeAttrs(AttrKinds);
6471     if (isAssumedReadNone())
6472       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6473 
6474     // Use the generic manifest method.
6475     return IRAttribute::manifest(A);
6476   }
6477 
6478   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6479   bool checkForAllAccessesToMemoryKind(
6480       function_ref<bool(const Instruction *, const Value *, AccessKind,
6481                         MemoryLocationsKind)>
6482           Pred,
6483       MemoryLocationsKind RequestedMLK) const override {
6484     if (!isValidState())
6485       return false;
6486 
6487     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6488     if (AssumedMLK == NO_LOCATIONS)
6489       return true;
6490 
6491     unsigned Idx = 0;
6492     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6493          CurMLK *= 2, ++Idx) {
6494       if (CurMLK & RequestedMLK)
6495         continue;
6496 
6497       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6498         for (const AccessInfo &AI : *Accesses)
6499           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6500             return false;
6501     }
6502 
6503     return true;
6504   }
6505 
6506   ChangeStatus indicatePessimisticFixpoint() override {
6507     // If we give up and indicate a pessimistic fixpoint this instruction will
6508     // become an access for all potential access kinds:
6509     // TODO: Add pointers for argmemonly and globals to improve the results of
6510     //       checkForAllAccessesToMemoryKind.
6511     bool Changed = false;
6512     MemoryLocationsKind KnownMLK = getKnown();
6513     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6514     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6515       if (!(CurMLK & KnownMLK))
6516         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6517                                   getAccessKindFromInst(I));
6518     return AAMemoryLocation::indicatePessimisticFixpoint();
6519   }
6520 
6521 protected:
6522   /// Helper struct to tie together an instruction that has a read or write
6523   /// effect with the pointer it accesses (if any).
6524   struct AccessInfo {
6525 
6526     /// The instruction that caused the access.
6527     const Instruction *I;
6528 
6529     /// The base pointer that is accessed, or null if unknown.
6530     const Value *Ptr;
6531 
6532     /// The kind of access (read/write/read+write).
6533     AccessKind Kind;
6534 
6535     bool operator==(const AccessInfo &RHS) const {
6536       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6537     }
6538     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6539       if (LHS.I != RHS.I)
6540         return LHS.I < RHS.I;
6541       if (LHS.Ptr != RHS.Ptr)
6542         return LHS.Ptr < RHS.Ptr;
6543       if (LHS.Kind != RHS.Kind)
6544         return LHS.Kind < RHS.Kind;
6545       return false;
6546     }
6547   };
6548 
6549   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6550   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6551   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6552   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6553 
6554   /// Categorize the pointer arguments of CB that might access memory in
6555   /// AccessedLoc and update the state and access map accordingly.
6556   void
6557   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6558                                      AAMemoryLocation::StateType &AccessedLocs,
6559                                      bool &Changed);
6560 
6561   /// Return the kind(s) of location that may be accessed by \p V.
6562   AAMemoryLocation::MemoryLocationsKind
6563   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6564 
6565   /// Return the access kind as determined by \p I.
6566   AccessKind getAccessKindFromInst(const Instruction *I) {
6567     AccessKind AK = READ_WRITE;
6568     if (I) {
6569       AK = I->mayReadFromMemory() ? READ : NONE;
6570       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6571     }
6572     return AK;
6573   }
6574 
6575   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6576   /// an access of kind \p AK to a \p MLK memory location with the access
6577   /// pointer \p Ptr.
6578   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6579                                  MemoryLocationsKind MLK, const Instruction *I,
6580                                  const Value *Ptr, bool &Changed,
6581                                  AccessKind AK = READ_WRITE) {
6582 
6583     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6584     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6585     if (!Accesses)
6586       Accesses = new (Allocator) AccessSet();
6587     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6588     State.removeAssumedBits(MLK);
6589   }
6590 
6591   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6592   /// arguments, and update the state and access map accordingly.
6593   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6594                           AAMemoryLocation::StateType &State, bool &Changed);
6595 
6596   /// Used to allocate access sets.
6597   BumpPtrAllocator &Allocator;
6598 
6599   /// The set of IR attributes AAMemoryLocation deals with.
6600   static const Attribute::AttrKind AttrKinds[4];
6601 };
6602 
6603 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6604     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6605     Attribute::InaccessibleMemOrArgMemOnly};
6606 
6607 void AAMemoryLocationImpl::categorizePtrValue(
6608     Attributor &A, const Instruction &I, const Value &Ptr,
6609     AAMemoryLocation::StateType &State, bool &Changed) {
6610   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6611                     << Ptr << " ["
6612                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6613 
6614   auto StripGEPCB = [](Value *V) -> Value * {
6615     auto *GEP = dyn_cast<GEPOperator>(V);
6616     while (GEP) {
6617       V = GEP->getPointerOperand();
6618       GEP = dyn_cast<GEPOperator>(V);
6619     }
6620     return V;
6621   };
6622 
6623   auto VisitValueCB = [&](Value &V, const Instruction *,
6624                           AAMemoryLocation::StateType &T,
6625                           bool Stripped) -> bool {
6626     // TODO: recognize the TBAA used for constant accesses.
6627     MemoryLocationsKind MLK = NO_LOCATIONS;
6628     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6629     if (isa<UndefValue>(V))
6630       return true;
6631     if (auto *Arg = dyn_cast<Argument>(&V)) {
6632       if (Arg->hasByValAttr())
6633         MLK = NO_LOCAL_MEM;
6634       else
6635         MLK = NO_ARGUMENT_MEM;
6636     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6637       // Reading constant memory is not treated as a read "effect" by the
6638       // function attr pass so we won't neither. Constants defined by TBAA are
6639       // similar. (We know we do not write it because it is constant.)
6640       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6641         if (GVar->isConstant())
6642           return true;
6643 
6644       if (GV->hasLocalLinkage())
6645         MLK = NO_GLOBAL_INTERNAL_MEM;
6646       else
6647         MLK = NO_GLOBAL_EXTERNAL_MEM;
6648     } else if (isa<ConstantPointerNull>(V) &&
6649                !NullPointerIsDefined(getAssociatedFunction(),
6650                                      V.getType()->getPointerAddressSpace())) {
6651       return true;
6652     } else if (isa<AllocaInst>(V)) {
6653       MLK = NO_LOCAL_MEM;
6654     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6655       const auto &NoAliasAA =
6656           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6657       if (NoAliasAA.isAssumedNoAlias())
6658         MLK = NO_MALLOCED_MEM;
6659       else
6660         MLK = NO_UNKOWN_MEM;
6661     } else {
6662       MLK = NO_UNKOWN_MEM;
6663     }
6664 
6665     assert(MLK != NO_LOCATIONS && "No location specified!");
6666     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6667                               getAccessKindFromInst(&I));
6668     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6669                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6670                       << "\n");
6671     return true;
6672   };
6673 
6674   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6675           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6676           /* UseValueSimplify */ true,
6677           /* MaxValues */ 32, StripGEPCB)) {
6678     LLVM_DEBUG(
6679         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6680     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6681                               getAccessKindFromInst(&I));
6682   } else {
6683     LLVM_DEBUG(
6684         dbgs()
6685         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6686         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6687   }
6688 }
6689 
6690 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6691     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6692     bool &Changed) {
6693   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6694 
6695     // Skip non-pointer arguments.
6696     const Value *ArgOp = CB.getArgOperand(ArgNo);
6697     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6698       continue;
6699 
6700     // Skip readnone arguments.
6701     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6702     const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6703         *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6704 
6705     if (ArgOpMemLocationAA.isAssumedReadNone())
6706       continue;
6707 
6708     // Categorize potentially accessed pointer arguments as if there was an
6709     // access instruction with them as pointer.
6710     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6711   }
6712 }
6713 
6714 AAMemoryLocation::MemoryLocationsKind
6715 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6716                                                   bool &Changed) {
6717   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6718                     << I << "\n");
6719 
6720   AAMemoryLocation::StateType AccessedLocs;
6721   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6722 
6723   if (auto *CB = dyn_cast<CallBase>(&I)) {
6724 
6725     // First check if we assume any memory is access is visible.
6726     const auto &CBMemLocationAA =
6727         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6728     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6729                       << " [" << CBMemLocationAA << "]\n");
6730 
6731     if (CBMemLocationAA.isAssumedReadNone())
6732       return NO_LOCATIONS;
6733 
6734     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6735       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6736                                 Changed, getAccessKindFromInst(&I));
6737       return AccessedLocs.getAssumed();
6738     }
6739 
6740     uint32_t CBAssumedNotAccessedLocs =
6741         CBMemLocationAA.getAssumedNotAccessedLocation();
6742 
6743     // Set the argmemonly and global bit as we handle them separately below.
6744     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6745         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6746 
6747     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6748       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6749         continue;
6750       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6751                                 getAccessKindFromInst(&I));
6752     }
6753 
6754     // Now handle global memory if it might be accessed. This is slightly tricky
6755     // as NO_GLOBAL_MEM has multiple bits set.
6756     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6757     if (HasGlobalAccesses) {
6758       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6759                             AccessKind Kind, MemoryLocationsKind MLK) {
6760         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6761                                   getAccessKindFromInst(&I));
6762         return true;
6763       };
6764       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6765               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6766         return AccessedLocs.getWorstState();
6767     }
6768 
6769     LLVM_DEBUG(
6770         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6771                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6772 
6773     // Now handle argument memory if it might be accessed.
6774     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6775     if (HasArgAccesses)
6776       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6777 
6778     LLVM_DEBUG(
6779         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6780                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6781 
6782     return AccessedLocs.getAssumed();
6783   }
6784 
6785   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6786     LLVM_DEBUG(
6787         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6788                << I << " [" << *Ptr << "]\n");
6789     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6790     return AccessedLocs.getAssumed();
6791   }
6792 
6793   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6794                     << I << "\n");
6795   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6796                             getAccessKindFromInst(&I));
6797   return AccessedLocs.getAssumed();
6798 }
6799 
6800 /// An AA to represent the memory behavior function attributes.
6801 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6802   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6803       : AAMemoryLocationImpl(IRP, A) {}
6804 
6805   /// See AbstractAttribute::updateImpl(Attributor &A).
6806   virtual ChangeStatus updateImpl(Attributor &A) override {
6807 
6808     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6809         *this, getIRPosition(), /* TrackDependence */ false);
6810     if (MemBehaviorAA.isAssumedReadNone()) {
6811       if (MemBehaviorAA.isKnownReadNone())
6812         return indicateOptimisticFixpoint();
6813       assert(isAssumedReadNone() &&
6814              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6815       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6816       return ChangeStatus::UNCHANGED;
6817     }
6818 
6819     // The current assumed state used to determine a change.
6820     auto AssumedState = getAssumed();
6821     bool Changed = false;
6822 
6823     auto CheckRWInst = [&](Instruction &I) {
6824       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6825       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6826                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6827       removeAssumedBits(inverseLocation(MLK, false, false));
6828       // Stop once only the valid bit set in the *not assumed location*, thus
6829       // once we don't actually exclude any memory locations in the state.
6830       return getAssumedNotAccessedLocation() != VALID_STATE;
6831     };
6832 
6833     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6834       return indicatePessimisticFixpoint();
6835 
6836     Changed |= AssumedState != getAssumed();
6837     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6838   }
6839 
6840   /// See AbstractAttribute::trackStatistics()
6841   void trackStatistics() const override {
6842     if (isAssumedReadNone())
6843       STATS_DECLTRACK_FN_ATTR(readnone)
6844     else if (isAssumedArgMemOnly())
6845       STATS_DECLTRACK_FN_ATTR(argmemonly)
6846     else if (isAssumedInaccessibleMemOnly())
6847       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6848     else if (isAssumedInaccessibleOrArgMemOnly())
6849       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6850   }
6851 };
6852 
6853 /// AAMemoryLocation attribute for call sites.
6854 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6855   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6856       : AAMemoryLocationImpl(IRP, A) {}
6857 
6858   /// See AbstractAttribute::initialize(...).
6859   void initialize(Attributor &A) override {
6860     AAMemoryLocationImpl::initialize(A);
6861     Function *F = getAssociatedFunction();
6862     if (!F || F->isDeclaration())
6863       indicatePessimisticFixpoint();
6864   }
6865 
6866   /// See AbstractAttribute::updateImpl(...).
6867   ChangeStatus updateImpl(Attributor &A) override {
6868     // TODO: Once we have call site specific value information we can provide
6869     //       call site specific liveness liveness information and then it makes
6870     //       sense to specialize attributes for call sites arguments instead of
6871     //       redirecting requests to the callee argument.
6872     Function *F = getAssociatedFunction();
6873     const IRPosition &FnPos = IRPosition::function(*F);
6874     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6875     bool Changed = false;
6876     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6877                           AccessKind Kind, MemoryLocationsKind MLK) {
6878       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6879                                 getAccessKindFromInst(I));
6880       return true;
6881     };
6882     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6883       return indicatePessimisticFixpoint();
6884     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6885   }
6886 
6887   /// See AbstractAttribute::trackStatistics()
6888   void trackStatistics() const override {
6889     if (isAssumedReadNone())
6890       STATS_DECLTRACK_CS_ATTR(readnone)
6891   }
6892 };
6893 
6894 /// ------------------ Value Constant Range Attribute -------------------------
6895 
6896 struct AAValueConstantRangeImpl : AAValueConstantRange {
6897   using StateType = IntegerRangeState;
6898   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6899       : AAValueConstantRange(IRP, A) {}
6900 
6901   /// See AbstractAttribute::getAsStr().
6902   const std::string getAsStr() const override {
6903     std::string Str;
6904     llvm::raw_string_ostream OS(Str);
6905     OS << "range(" << getBitWidth() << ")<";
6906     getKnown().print(OS);
6907     OS << " / ";
6908     getAssumed().print(OS);
6909     OS << ">";
6910     return OS.str();
6911   }
6912 
6913   /// Helper function to get a SCEV expr for the associated value at program
6914   /// point \p I.
6915   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6916     if (!getAnchorScope())
6917       return nullptr;
6918 
6919     ScalarEvolution *SE =
6920         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6921             *getAnchorScope());
6922 
6923     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6924         *getAnchorScope());
6925 
6926     if (!SE || !LI)
6927       return nullptr;
6928 
6929     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6930     if (!I)
6931       return S;
6932 
6933     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6934   }
6935 
6936   /// Helper function to get a range from SCEV for the associated value at
6937   /// program point \p I.
6938   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6939                                          const Instruction *I = nullptr) const {
6940     if (!getAnchorScope())
6941       return getWorstState(getBitWidth());
6942 
6943     ScalarEvolution *SE =
6944         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6945             *getAnchorScope());
6946 
6947     const SCEV *S = getSCEV(A, I);
6948     if (!SE || !S)
6949       return getWorstState(getBitWidth());
6950 
6951     return SE->getUnsignedRange(S);
6952   }
6953 
6954   /// Helper function to get a range from LVI for the associated value at
6955   /// program point \p I.
6956   ConstantRange
6957   getConstantRangeFromLVI(Attributor &A,
6958                           const Instruction *CtxI = nullptr) const {
6959     if (!getAnchorScope())
6960       return getWorstState(getBitWidth());
6961 
6962     LazyValueInfo *LVI =
6963         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6964             *getAnchorScope());
6965 
6966     if (!LVI || !CtxI)
6967       return getWorstState(getBitWidth());
6968     return LVI->getConstantRange(&getAssociatedValue(),
6969                                  const_cast<Instruction *>(CtxI));
6970   }
6971 
6972   /// See AAValueConstantRange::getKnownConstantRange(..).
6973   ConstantRange
6974   getKnownConstantRange(Attributor &A,
6975                         const Instruction *CtxI = nullptr) const override {
6976     if (!CtxI || CtxI == getCtxI())
6977       return getKnown();
6978 
6979     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6980     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6981     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6982   }
6983 
6984   /// See AAValueConstantRange::getAssumedConstantRange(..).
6985   ConstantRange
6986   getAssumedConstantRange(Attributor &A,
6987                           const Instruction *CtxI = nullptr) const override {
6988     // TODO: Make SCEV use Attributor assumption.
6989     //       We may be able to bound a variable range via assumptions in
6990     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6991     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6992 
6993     if (!CtxI || CtxI == getCtxI())
6994       return getAssumed();
6995 
6996     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6997     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6998     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6999   }
7000 
7001   /// See AbstractAttribute::initialize(..).
7002   void initialize(Attributor &A) override {
7003     // Intersect a range given by SCEV.
7004     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7005 
7006     // Intersect a range given by LVI.
7007     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7008   }
7009 
7010   /// Helper function to create MDNode for range metadata.
7011   static MDNode *
7012   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7013                             const ConstantRange &AssumedConstantRange) {
7014     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7015                                   Ty, AssumedConstantRange.getLower())),
7016                               ConstantAsMetadata::get(ConstantInt::get(
7017                                   Ty, AssumedConstantRange.getUpper()))};
7018     return MDNode::get(Ctx, LowAndHigh);
7019   }
7020 
7021   /// Return true if \p Assumed is included in \p KnownRanges.
7022   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7023 
7024     if (Assumed.isFullSet())
7025       return false;
7026 
7027     if (!KnownRanges)
7028       return true;
7029 
7030     // If multiple ranges are annotated in IR, we give up to annotate assumed
7031     // range for now.
7032 
7033     // TODO:  If there exists a known range which containts assumed range, we
7034     // can say assumed range is better.
7035     if (KnownRanges->getNumOperands() > 2)
7036       return false;
7037 
7038     ConstantInt *Lower =
7039         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7040     ConstantInt *Upper =
7041         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7042 
7043     ConstantRange Known(Lower->getValue(), Upper->getValue());
7044     return Known.contains(Assumed) && Known != Assumed;
7045   }
7046 
7047   /// Helper function to set range metadata.
7048   static bool
7049   setRangeMetadataIfisBetterRange(Instruction *I,
7050                                   const ConstantRange &AssumedConstantRange) {
7051     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7052     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7053       if (!AssumedConstantRange.isEmptySet()) {
7054         I->setMetadata(LLVMContext::MD_range,
7055                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7056                                                  AssumedConstantRange));
7057         return true;
7058       }
7059     }
7060     return false;
7061   }
7062 
7063   /// See AbstractAttribute::manifest()
7064   ChangeStatus manifest(Attributor &A) override {
7065     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7066     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7067     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7068 
7069     auto &V = getAssociatedValue();
7070     if (!AssumedConstantRange.isEmptySet() &&
7071         !AssumedConstantRange.isSingleElement()) {
7072       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7073         assert(I == getCtxI() && "Should not annotate an instruction which is "
7074                                  "not the context instruction");
7075         if (isa<CallInst>(I) || isa<LoadInst>(I))
7076           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7077             Changed = ChangeStatus::CHANGED;
7078       }
7079     }
7080 
7081     return Changed;
7082   }
7083 };
7084 
7085 struct AAValueConstantRangeArgument final
7086     : AAArgumentFromCallSiteArguments<
7087           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
7088   using Base = AAArgumentFromCallSiteArguments<
7089       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
7090   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7091       : Base(IRP, A) {}
7092 
7093   /// See AbstractAttribute::initialize(..).
7094   void initialize(Attributor &A) override {
7095     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7096       indicatePessimisticFixpoint();
7097     } else {
7098       Base::initialize(A);
7099     }
7100   }
7101 
7102   /// See AbstractAttribute::trackStatistics()
7103   void trackStatistics() const override {
7104     STATS_DECLTRACK_ARG_ATTR(value_range)
7105   }
7106 };
7107 
7108 struct AAValueConstantRangeReturned
7109     : AAReturnedFromReturnedValues<AAValueConstantRange,
7110                                    AAValueConstantRangeImpl> {
7111   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
7112                                             AAValueConstantRangeImpl>;
7113   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7114       : Base(IRP, A) {}
7115 
7116   /// See AbstractAttribute::initialize(...).
7117   void initialize(Attributor &A) override {}
7118 
7119   /// See AbstractAttribute::trackStatistics()
7120   void trackStatistics() const override {
7121     STATS_DECLTRACK_FNRET_ATTR(value_range)
7122   }
7123 };
7124 
7125 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7126   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7127       : AAValueConstantRangeImpl(IRP, A) {}
7128 
7129   /// See AbstractAttribute::initialize(...).
7130   void initialize(Attributor &A) override {
7131     AAValueConstantRangeImpl::initialize(A);
7132     Value &V = getAssociatedValue();
7133 
7134     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7135       unionAssumed(ConstantRange(C->getValue()));
7136       indicateOptimisticFixpoint();
7137       return;
7138     }
7139 
7140     if (isa<UndefValue>(&V)) {
7141       // Collapse the undef state to 0.
7142       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7143       indicateOptimisticFixpoint();
7144       return;
7145     }
7146 
7147     if (isa<CallBase>(&V))
7148       return;
7149 
7150     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7151       return;
7152     // If it is a load instruction with range metadata, use it.
7153     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7154       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7155         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7156         return;
7157       }
7158 
7159     // We can work with PHI and select instruction as we traverse their operands
7160     // during update.
7161     if (isa<SelectInst>(V) || isa<PHINode>(V))
7162       return;
7163 
7164     // Otherwise we give up.
7165     indicatePessimisticFixpoint();
7166 
7167     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7168                       << getAssociatedValue() << "\n");
7169   }
7170 
7171   bool calculateBinaryOperator(
7172       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7173       const Instruction *CtxI,
7174       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7175     Value *LHS = BinOp->getOperand(0);
7176     Value *RHS = BinOp->getOperand(1);
7177     // TODO: Allow non integers as well.
7178     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7179       return false;
7180 
7181     auto &LHSAA =
7182         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7183     QuerriedAAs.push_back(&LHSAA);
7184     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7185 
7186     auto &RHSAA =
7187         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7188     QuerriedAAs.push_back(&RHSAA);
7189     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7190 
7191     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7192 
7193     T.unionAssumed(AssumedRange);
7194 
7195     // TODO: Track a known state too.
7196 
7197     return T.isValidState();
7198   }
7199 
7200   bool calculateCastInst(
7201       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7202       const Instruction *CtxI,
7203       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7204     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7205     // TODO: Allow non integers as well.
7206     Value &OpV = *CastI->getOperand(0);
7207     if (!OpV.getType()->isIntegerTy())
7208       return false;
7209 
7210     auto &OpAA =
7211         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7212     QuerriedAAs.push_back(&OpAA);
7213     T.unionAssumed(
7214         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7215     return T.isValidState();
7216   }
7217 
7218   bool
7219   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7220                    const Instruction *CtxI,
7221                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7222     Value *LHS = CmpI->getOperand(0);
7223     Value *RHS = CmpI->getOperand(1);
7224     // TODO: Allow non integers as well.
7225     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7226       return false;
7227 
7228     auto &LHSAA =
7229         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7230     QuerriedAAs.push_back(&LHSAA);
7231     auto &RHSAA =
7232         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7233     QuerriedAAs.push_back(&RHSAA);
7234 
7235     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7236     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7237 
7238     // If one of them is empty set, we can't decide.
7239     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7240       return true;
7241 
7242     bool MustTrue = false, MustFalse = false;
7243 
7244     auto AllowedRegion =
7245         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7246 
7247     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7248         CmpI->getPredicate(), RHSAARange);
7249 
7250     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7251       MustFalse = true;
7252 
7253     if (SatisfyingRegion.contains(LHSAARange))
7254       MustTrue = true;
7255 
7256     assert((!MustTrue || !MustFalse) &&
7257            "Either MustTrue or MustFalse should be false!");
7258 
7259     if (MustTrue)
7260       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7261     else if (MustFalse)
7262       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7263     else
7264       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7265 
7266     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7267                       << " " << RHSAA << "\n");
7268 
7269     // TODO: Track a known state too.
7270     return T.isValidState();
7271   }
7272 
7273   /// See AbstractAttribute::updateImpl(...).
7274   ChangeStatus updateImpl(Attributor &A) override {
7275     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7276                             IntegerRangeState &T, bool Stripped) -> bool {
7277       Instruction *I = dyn_cast<Instruction>(&V);
7278       if (!I || isa<CallBase>(I)) {
7279 
7280         // If the value is not instruction, we query AA to Attributor.
7281         const auto &AA =
7282             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7283 
7284         // Clamp operator is not used to utilize a program point CtxI.
7285         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7286 
7287         return T.isValidState();
7288       }
7289 
7290       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7291       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7292         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7293           return false;
7294       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7295         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7296           return false;
7297       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7298         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7299           return false;
7300       } else {
7301         // Give up with other instructions.
7302         // TODO: Add other instructions
7303 
7304         T.indicatePessimisticFixpoint();
7305         return false;
7306       }
7307 
7308       // Catch circular reasoning in a pessimistic way for now.
7309       // TODO: Check how the range evolves and if we stripped anything, see also
7310       //       AADereferenceable or AAAlign for similar situations.
7311       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7312         if (QueriedAA != this)
7313           continue;
7314         // If we are in a stady state we do not need to worry.
7315         if (T.getAssumed() == getState().getAssumed())
7316           continue;
7317         T.indicatePessimisticFixpoint();
7318       }
7319 
7320       return T.isValidState();
7321     };
7322 
7323     IntegerRangeState T(getBitWidth());
7324 
7325     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7326             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7327             /* UseValueSimplify */ false))
7328       return indicatePessimisticFixpoint();
7329 
7330     return clampStateAndIndicateChange(getState(), T);
7331   }
7332 
7333   /// See AbstractAttribute::trackStatistics()
7334   void trackStatistics() const override {
7335     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7336   }
7337 };
7338 
7339 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7340   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7341       : AAValueConstantRangeImpl(IRP, A) {}
7342 
7343   /// See AbstractAttribute::initialize(...).
7344   ChangeStatus updateImpl(Attributor &A) override {
7345     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7346                      "not be called");
7347   }
7348 
7349   /// See AbstractAttribute::trackStatistics()
7350   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7351 };
7352 
7353 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7354   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7355       : AAValueConstantRangeFunction(IRP, A) {}
7356 
7357   /// See AbstractAttribute::trackStatistics()
7358   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7359 };
7360 
7361 struct AAValueConstantRangeCallSiteReturned
7362     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7363                                      AAValueConstantRangeImpl> {
7364   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7365       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7366                                        AAValueConstantRangeImpl>(IRP, A) {}
7367 
7368   /// See AbstractAttribute::initialize(...).
7369   void initialize(Attributor &A) override {
7370     // If it is a load instruction with range metadata, use the metadata.
7371     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7372       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7373         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7374 
7375     AAValueConstantRangeImpl::initialize(A);
7376   }
7377 
7378   /// See AbstractAttribute::trackStatistics()
7379   void trackStatistics() const override {
7380     STATS_DECLTRACK_CSRET_ATTR(value_range)
7381   }
7382 };
7383 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7384   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7385       : AAValueConstantRangeFloating(IRP, A) {}
7386 
7387   /// See AbstractAttribute::manifest()
7388   ChangeStatus manifest(Attributor &A) override {
7389     return ChangeStatus::UNCHANGED;
7390   }
7391 
7392   /// See AbstractAttribute::trackStatistics()
7393   void trackStatistics() const override {
7394     STATS_DECLTRACK_CSARG_ATTR(value_range)
7395   }
7396 };
7397 
7398 /// ------------------ Potential Values Attribute -------------------------
7399 
7400 struct AAPotentialValuesImpl : AAPotentialValues {
7401   using StateType = PotentialConstantIntValuesState;
7402 
7403   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7404       : AAPotentialValues(IRP, A) {}
7405 
7406   /// See AbstractAttribute::getAsStr().
7407   const std::string getAsStr() const override {
7408     std::string Str;
7409     llvm::raw_string_ostream OS(Str);
7410     OS << getState();
7411     return OS.str();
7412   }
7413 
7414   /// See AbstractAttribute::updateImpl(...).
7415   ChangeStatus updateImpl(Attributor &A) override {
7416     return indicatePessimisticFixpoint();
7417   }
7418 };
7419 
7420 struct AAPotentialValuesArgument final
7421     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7422                                       PotentialConstantIntValuesState> {
7423   using Base =
7424       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7425                                       PotentialConstantIntValuesState>;
7426   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7427       : Base(IRP, A) {}
7428 
7429   /// See AbstractAttribute::initialize(..).
7430   void initialize(Attributor &A) override {
7431     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7432       indicatePessimisticFixpoint();
7433     } else {
7434       Base::initialize(A);
7435     }
7436   }
7437 
7438   /// See AbstractAttribute::trackStatistics()
7439   void trackStatistics() const override {
7440     STATS_DECLTRACK_ARG_ATTR(potential_values)
7441   }
7442 };
7443 
7444 struct AAPotentialValuesReturned
7445     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7446   using Base =
7447       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7448   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7449       : Base(IRP, A) {}
7450 
7451   /// See AbstractAttribute::trackStatistics()
7452   void trackStatistics() const override {
7453     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7454   }
7455 };
7456 
7457 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7458   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7459       : AAPotentialValuesImpl(IRP, A) {}
7460 
7461   /// See AbstractAttribute::initialize(..).
7462   void initialize(Attributor &A) override {
7463     Value &V = getAssociatedValue();
7464 
7465     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7466       unionAssumed(C->getValue());
7467       indicateOptimisticFixpoint();
7468       return;
7469     }
7470 
7471     if (isa<UndefValue>(&V)) {
7472       unionAssumedWithUndef();
7473       indicateOptimisticFixpoint();
7474       return;
7475     }
7476 
7477     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7478       return;
7479 
7480     if (isa<SelectInst>(V) || isa<PHINode>(V))
7481       return;
7482 
7483     indicatePessimisticFixpoint();
7484 
7485     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7486                       << getAssociatedValue() << "\n");
7487   }
7488 
7489   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7490                                 const APInt &RHS) {
7491     ICmpInst::Predicate Pred = ICI->getPredicate();
7492     switch (Pred) {
7493     case ICmpInst::ICMP_UGT:
7494       return LHS.ugt(RHS);
7495     case ICmpInst::ICMP_SGT:
7496       return LHS.sgt(RHS);
7497     case ICmpInst::ICMP_EQ:
7498       return LHS.eq(RHS);
7499     case ICmpInst::ICMP_UGE:
7500       return LHS.uge(RHS);
7501     case ICmpInst::ICMP_SGE:
7502       return LHS.sge(RHS);
7503     case ICmpInst::ICMP_ULT:
7504       return LHS.ult(RHS);
7505     case ICmpInst::ICMP_SLT:
7506       return LHS.slt(RHS);
7507     case ICmpInst::ICMP_NE:
7508       return LHS.ne(RHS);
7509     case ICmpInst::ICMP_ULE:
7510       return LHS.ule(RHS);
7511     case ICmpInst::ICMP_SLE:
7512       return LHS.sle(RHS);
7513     default:
7514       llvm_unreachable("Invalid ICmp predicate!");
7515     }
7516   }
7517 
7518   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7519                                  uint32_t ResultBitWidth) {
7520     Instruction::CastOps CastOp = CI->getOpcode();
7521     switch (CastOp) {
7522     default:
7523       llvm_unreachable("unsupported or not integer cast");
7524     case Instruction::Trunc:
7525       return Src.trunc(ResultBitWidth);
7526     case Instruction::SExt:
7527       return Src.sext(ResultBitWidth);
7528     case Instruction::ZExt:
7529       return Src.zext(ResultBitWidth);
7530     case Instruction::BitCast:
7531       return Src;
7532     }
7533   }
7534 
7535   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7536                                        const APInt &LHS, const APInt &RHS,
7537                                        bool &SkipOperation, bool &Unsupported) {
7538     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7539     // Unsupported is set to true when the binary operator is not supported.
7540     // SkipOperation is set to true when UB occur with the given operand pair
7541     // (LHS, RHS).
7542     // TODO: we should look at nsw and nuw keywords to handle operations
7543     //       that create poison or undef value.
7544     switch (BinOpcode) {
7545     default:
7546       Unsupported = true;
7547       return LHS;
7548     case Instruction::Add:
7549       return LHS + RHS;
7550     case Instruction::Sub:
7551       return LHS - RHS;
7552     case Instruction::Mul:
7553       return LHS * RHS;
7554     case Instruction::UDiv:
7555       if (RHS.isNullValue()) {
7556         SkipOperation = true;
7557         return LHS;
7558       }
7559       return LHS.udiv(RHS);
7560     case Instruction::SDiv:
7561       if (RHS.isNullValue()) {
7562         SkipOperation = true;
7563         return LHS;
7564       }
7565       return LHS.sdiv(RHS);
7566     case Instruction::URem:
7567       if (RHS.isNullValue()) {
7568         SkipOperation = true;
7569         return LHS;
7570       }
7571       return LHS.urem(RHS);
7572     case Instruction::SRem:
7573       if (RHS.isNullValue()) {
7574         SkipOperation = true;
7575         return LHS;
7576       }
7577       return LHS.srem(RHS);
7578     case Instruction::Shl:
7579       return LHS.shl(RHS);
7580     case Instruction::LShr:
7581       return LHS.lshr(RHS);
7582     case Instruction::AShr:
7583       return LHS.ashr(RHS);
7584     case Instruction::And:
7585       return LHS & RHS;
7586     case Instruction::Or:
7587       return LHS | RHS;
7588     case Instruction::Xor:
7589       return LHS ^ RHS;
7590     }
7591   }
7592 
7593   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7594                                            const APInt &LHS, const APInt &RHS) {
7595     bool SkipOperation = false;
7596     bool Unsupported = false;
7597     APInt Result =
7598         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7599     if (Unsupported)
7600       return false;
7601     // If SkipOperation is true, we can ignore this operand pair (L, R).
7602     if (!SkipOperation)
7603       unionAssumed(Result);
7604     return isValidState();
7605   }
7606 
7607   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7608     auto AssumedBefore = getAssumed();
7609     Value *LHS = ICI->getOperand(0);
7610     Value *RHS = ICI->getOperand(1);
7611     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7612       return indicatePessimisticFixpoint();
7613 
7614     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7615     if (!LHSAA.isValidState())
7616       return indicatePessimisticFixpoint();
7617 
7618     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7619     if (!RHSAA.isValidState())
7620       return indicatePessimisticFixpoint();
7621 
7622     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7623     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7624 
7625     // TODO: make use of undef flag to limit potential values aggressively.
7626     bool MaybeTrue = false, MaybeFalse = false;
7627     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7628     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7629       // The result of any comparison between undefs can be soundly replaced
7630       // with undef.
7631       unionAssumedWithUndef();
7632     } else if (LHSAA.undefIsContained()) {
7633       bool MaybeTrue = false, MaybeFalse = false;
7634       for (const APInt &R : RHSAAPVS) {
7635         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7636         MaybeTrue |= CmpResult;
7637         MaybeFalse |= !CmpResult;
7638         if (MaybeTrue & MaybeFalse)
7639           return indicatePessimisticFixpoint();
7640       }
7641     } else if (RHSAA.undefIsContained()) {
7642       for (const APInt &L : LHSAAPVS) {
7643         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7644         MaybeTrue |= CmpResult;
7645         MaybeFalse |= !CmpResult;
7646         if (MaybeTrue & MaybeFalse)
7647           return indicatePessimisticFixpoint();
7648       }
7649     } else {
7650       for (const APInt &L : LHSAAPVS) {
7651         for (const APInt &R : RHSAAPVS) {
7652           bool CmpResult = calculateICmpInst(ICI, L, R);
7653           MaybeTrue |= CmpResult;
7654           MaybeFalse |= !CmpResult;
7655           if (MaybeTrue & MaybeFalse)
7656             return indicatePessimisticFixpoint();
7657         }
7658       }
7659     }
7660     if (MaybeTrue)
7661       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7662     if (MaybeFalse)
7663       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7664     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7665                                          : ChangeStatus::CHANGED;
7666   }
7667 
7668   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7669     auto AssumedBefore = getAssumed();
7670     Value *LHS = SI->getTrueValue();
7671     Value *RHS = SI->getFalseValue();
7672     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7673       return indicatePessimisticFixpoint();
7674 
7675     // TODO: Use assumed simplified condition value
7676     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7677     if (!LHSAA.isValidState())
7678       return indicatePessimisticFixpoint();
7679 
7680     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7681     if (!RHSAA.isValidState())
7682       return indicatePessimisticFixpoint();
7683 
7684     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7685       // select i1 *, undef , undef => undef
7686       unionAssumedWithUndef();
7687     else {
7688       unionAssumed(LHSAA);
7689       unionAssumed(RHSAA);
7690     }
7691     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7692                                          : ChangeStatus::CHANGED;
7693   }
7694 
7695   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7696     auto AssumedBefore = getAssumed();
7697     if (!CI->isIntegerCast())
7698       return indicatePessimisticFixpoint();
7699     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7700     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7701     Value *Src = CI->getOperand(0);
7702     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src));
7703     if (!SrcAA.isValidState())
7704       return indicatePessimisticFixpoint();
7705     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7706     if (SrcAA.undefIsContained())
7707       unionAssumedWithUndef();
7708     else {
7709       for (const APInt &S : SrcAAPVS) {
7710         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7711         unionAssumed(T);
7712       }
7713     }
7714     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7715                                          : ChangeStatus::CHANGED;
7716   }
7717 
7718   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7719     auto AssumedBefore = getAssumed();
7720     Value *LHS = BinOp->getOperand(0);
7721     Value *RHS = BinOp->getOperand(1);
7722     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7723       return indicatePessimisticFixpoint();
7724 
7725     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7726     if (!LHSAA.isValidState())
7727       return indicatePessimisticFixpoint();
7728 
7729     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7730     if (!RHSAA.isValidState())
7731       return indicatePessimisticFixpoint();
7732 
7733     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7734     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7735     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7736 
7737     // TODO: make use of undef flag to limit potential values aggressively.
7738     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7739       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7740         return indicatePessimisticFixpoint();
7741     } else if (LHSAA.undefIsContained()) {
7742       for (const APInt &R : RHSAAPVS) {
7743         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7744           return indicatePessimisticFixpoint();
7745       }
7746     } else if (RHSAA.undefIsContained()) {
7747       for (const APInt &L : LHSAAPVS) {
7748         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7749           return indicatePessimisticFixpoint();
7750       }
7751     } else {
7752       for (const APInt &L : LHSAAPVS) {
7753         for (const APInt &R : RHSAAPVS) {
7754           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7755             return indicatePessimisticFixpoint();
7756         }
7757       }
7758     }
7759     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7760                                          : ChangeStatus::CHANGED;
7761   }
7762 
7763   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7764     auto AssumedBefore = getAssumed();
7765     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7766       Value *IncomingValue = PHI->getIncomingValue(u);
7767       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7768           *this, IRPosition::value(*IncomingValue));
7769       if (!PotentialValuesAA.isValidState())
7770         return indicatePessimisticFixpoint();
7771       if (PotentialValuesAA.undefIsContained())
7772         unionAssumedWithUndef();
7773       else
7774         unionAssumed(PotentialValuesAA.getAssumed());
7775     }
7776     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7777                                          : ChangeStatus::CHANGED;
7778   }
7779 
7780   /// See AbstractAttribute::updateImpl(...).
7781   ChangeStatus updateImpl(Attributor &A) override {
7782     Value &V = getAssociatedValue();
7783     Instruction *I = dyn_cast<Instruction>(&V);
7784 
7785     if (auto *ICI = dyn_cast<ICmpInst>(I))
7786       return updateWithICmpInst(A, ICI);
7787 
7788     if (auto *SI = dyn_cast<SelectInst>(I))
7789       return updateWithSelectInst(A, SI);
7790 
7791     if (auto *CI = dyn_cast<CastInst>(I))
7792       return updateWithCastInst(A, CI);
7793 
7794     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7795       return updateWithBinaryOperator(A, BinOp);
7796 
7797     if (auto *PHI = dyn_cast<PHINode>(I))
7798       return updateWithPHINode(A, PHI);
7799 
7800     return indicatePessimisticFixpoint();
7801   }
7802 
7803   /// See AbstractAttribute::trackStatistics()
7804   void trackStatistics() const override {
7805     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7806   }
7807 };
7808 
7809 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7810   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7811       : AAPotentialValuesImpl(IRP, A) {}
7812 
7813   /// See AbstractAttribute::initialize(...).
7814   ChangeStatus updateImpl(Attributor &A) override {
7815     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7816                      "not be called");
7817   }
7818 
7819   /// See AbstractAttribute::trackStatistics()
7820   void trackStatistics() const override {
7821     STATS_DECLTRACK_FN_ATTR(potential_values)
7822   }
7823 };
7824 
7825 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7826   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7827       : AAPotentialValuesFunction(IRP, A) {}
7828 
7829   /// See AbstractAttribute::trackStatistics()
7830   void trackStatistics() const override {
7831     STATS_DECLTRACK_CS_ATTR(potential_values)
7832   }
7833 };
7834 
7835 struct AAPotentialValuesCallSiteReturned
7836     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7837   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7838       : AACallSiteReturnedFromReturned<AAPotentialValues,
7839                                        AAPotentialValuesImpl>(IRP, A) {}
7840 
7841   /// See AbstractAttribute::trackStatistics()
7842   void trackStatistics() const override {
7843     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7844   }
7845 };
7846 
7847 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7848   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7849       : AAPotentialValuesFloating(IRP, A) {}
7850 
7851   /// See AbstractAttribute::initialize(..).
7852   void initialize(Attributor &A) override {
7853     Value &V = getAssociatedValue();
7854 
7855     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7856       unionAssumed(C->getValue());
7857       indicateOptimisticFixpoint();
7858       return;
7859     }
7860 
7861     if (isa<UndefValue>(&V)) {
7862       unionAssumedWithUndef();
7863       indicateOptimisticFixpoint();
7864       return;
7865     }
7866   }
7867 
7868   /// See AbstractAttribute::updateImpl(...).
7869   ChangeStatus updateImpl(Attributor &A) override {
7870     Value &V = getAssociatedValue();
7871     auto AssumedBefore = getAssumed();
7872     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V));
7873     const auto &S = AA.getAssumed();
7874     unionAssumed(S);
7875     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7876                                          : ChangeStatus::CHANGED;
7877   }
7878 
7879   /// See AbstractAttribute::trackStatistics()
7880   void trackStatistics() const override {
7881     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7882   }
7883 };
7884 
7885 /// ------------------------ NoUndef Attribute ---------------------------------
7886 struct AANoUndefImpl : AANoUndef {
7887   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7888 
7889   /// See AbstractAttribute::initialize(...).
7890   void initialize(Attributor &A) override {
7891     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7892       indicateOptimisticFixpoint();
7893       return;
7894     }
7895     Value &V = getAssociatedValue();
7896     if (isa<UndefValue>(V))
7897       indicatePessimisticFixpoint();
7898     else if (isa<FreezeInst>(V))
7899       indicateOptimisticFixpoint();
7900     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7901              isGuaranteedNotToBeUndefOrPoison(&V))
7902       indicateOptimisticFixpoint();
7903     else
7904       AANoUndef::initialize(A);
7905   }
7906 
7907   /// See followUsesInMBEC
7908   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
7909                        AANoUndef::StateType &State) {
7910     const Value *UseV = U->get();
7911     const DominatorTree *DT = nullptr;
7912     AssumptionCache *AC = nullptr;
7913     InformationCache &InfoCache = A.getInfoCache();
7914     if (Function *F = getAnchorScope()) {
7915       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
7916       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
7917     }
7918     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
7919     bool TrackUse = false;
7920     // Track use for instructions which must produce undef or poison bits when
7921     // at least one operand contains such bits.
7922     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
7923       TrackUse = true;
7924     return TrackUse;
7925   }
7926 
7927   /// See AbstractAttribute::getAsStr().
7928   const std::string getAsStr() const override {
7929     return getAssumed() ? "noundef" : "may-undef-or-poison";
7930   }
7931 
7932   ChangeStatus manifest(Attributor &A) override {
7933     // We don't manifest noundef attribute for dead positions because the
7934     // associated values with dead positions would be replaced with undef
7935     // values.
7936     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
7937       return ChangeStatus::UNCHANGED;
7938     // A position whose simplified value does not have any value is
7939     // considered to be dead. We don't manifest noundef in such positions for
7940     // the same reason above.
7941     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
7942         *this, getIRPosition(), /* TrackDependence */ false);
7943     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
7944       return ChangeStatus::UNCHANGED;
7945     return AANoUndef::manifest(A);
7946   }
7947 };
7948 
7949 struct AANoUndefFloating : public AANoUndefImpl {
7950   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
7951       : AANoUndefImpl(IRP, A) {}
7952 
7953   /// See AbstractAttribute::initialize(...).
7954   void initialize(Attributor &A) override {
7955     AANoUndefImpl::initialize(A);
7956     if (!getState().isAtFixpoint())
7957       if (Instruction *CtxI = getCtxI())
7958         followUsesInMBEC(*this, A, getState(), *CtxI);
7959   }
7960 
7961   /// See AbstractAttribute::updateImpl(...).
7962   ChangeStatus updateImpl(Attributor &A) override {
7963     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7964                             AANoUndef::StateType &T, bool Stripped) -> bool {
7965       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V));
7966       if (!Stripped && this == &AA) {
7967         T.indicatePessimisticFixpoint();
7968       } else {
7969         const AANoUndef::StateType &S =
7970             static_cast<const AANoUndef::StateType &>(AA.getState());
7971         T ^= S;
7972       }
7973       return T.isValidState();
7974     };
7975 
7976     StateType T;
7977     if (!genericValueTraversal<AANoUndef, StateType>(
7978             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
7979       return indicatePessimisticFixpoint();
7980 
7981     return clampStateAndIndicateChange(getState(), T);
7982   }
7983 
7984   /// See AbstractAttribute::trackStatistics()
7985   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7986 };
7987 
7988 struct AANoUndefReturned final
7989     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
7990   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
7991       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
7992 
7993   /// See AbstractAttribute::trackStatistics()
7994   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7995 };
7996 
7997 struct AANoUndefArgument final
7998     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
7999   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8000       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8001 
8002   /// See AbstractAttribute::trackStatistics()
8003   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8004 };
8005 
8006 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8007   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8008       : AANoUndefFloating(IRP, A) {}
8009 
8010   /// See AbstractAttribute::trackStatistics()
8011   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8012 };
8013 
8014 struct AANoUndefCallSiteReturned final
8015     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8016   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8017       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8018 
8019   /// See AbstractAttribute::trackStatistics()
8020   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8021 };
8022 } // namespace
8023 
8024 const char AAReturnedValues::ID = 0;
8025 const char AANoUnwind::ID = 0;
8026 const char AANoSync::ID = 0;
8027 const char AANoFree::ID = 0;
8028 const char AANonNull::ID = 0;
8029 const char AANoRecurse::ID = 0;
8030 const char AAWillReturn::ID = 0;
8031 const char AAUndefinedBehavior::ID = 0;
8032 const char AANoAlias::ID = 0;
8033 const char AAReachability::ID = 0;
8034 const char AANoReturn::ID = 0;
8035 const char AAIsDead::ID = 0;
8036 const char AADereferenceable::ID = 0;
8037 const char AAAlign::ID = 0;
8038 const char AANoCapture::ID = 0;
8039 const char AAValueSimplify::ID = 0;
8040 const char AAHeapToStack::ID = 0;
8041 const char AAPrivatizablePtr::ID = 0;
8042 const char AAMemoryBehavior::ID = 0;
8043 const char AAMemoryLocation::ID = 0;
8044 const char AAValueConstantRange::ID = 0;
8045 const char AAPotentialValues::ID = 0;
8046 const char AANoUndef::ID = 0;
8047 
8048 // Macro magic to create the static generator function for attributes that
8049 // follow the naming scheme.
8050 
8051 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8052   case IRPosition::PK:                                                         \
8053     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8054 
8055 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8056   case IRPosition::PK:                                                         \
8057     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8058     ++NumAAs;                                                                  \
8059     break;
8060 
8061 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8062   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8063     CLASS *AA = nullptr;                                                       \
8064     switch (IRP.getPositionKind()) {                                           \
8065       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8066       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8067       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8068       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8069       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8070       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8071       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8072       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8073     }                                                                          \
8074     return *AA;                                                                \
8075   }
8076 
8077 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8078   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8079     CLASS *AA = nullptr;                                                       \
8080     switch (IRP.getPositionKind()) {                                           \
8081       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8082       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8083       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8084       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8085       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8086       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8087       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8088       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8089     }                                                                          \
8090     return *AA;                                                                \
8091   }
8092 
8093 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8094   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8095     CLASS *AA = nullptr;                                                       \
8096     switch (IRP.getPositionKind()) {                                           \
8097       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8098       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8099       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8100       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8101       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8102       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8103       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8104       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8105     }                                                                          \
8106     return *AA;                                                                \
8107   }
8108 
8109 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8110   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8111     CLASS *AA = nullptr;                                                       \
8112     switch (IRP.getPositionKind()) {                                           \
8113       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8114       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8115       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8116       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8117       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8118       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8119       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8120       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8121     }                                                                          \
8122     return *AA;                                                                \
8123   }
8124 
8125 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8126   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8127     CLASS *AA = nullptr;                                                       \
8128     switch (IRP.getPositionKind()) {                                           \
8129       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8130       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8131       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8132       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8133       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8134       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8135       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8136       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8137     }                                                                          \
8138     return *AA;                                                                \
8139   }
8140 
8141 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8142 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8143 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8144 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8145 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8146 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8147 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8148 
8149 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8150 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8151 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8152 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8153 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8154 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8155 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8156 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8157 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8158 
8159 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8160 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8161 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8162 
8163 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8164 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8165 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8166 
8167 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8168 
8169 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8170 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8171 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8172 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8173 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8174 #undef SWITCH_PK_CREATE
8175 #undef SWITCH_PK_INV
8176