1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumeBundleQueries.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/LazyValueInfo.h"
23 #include "llvm/Analysis/MemoryBuiltins.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/NoFolder.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 
34 #include <cassert>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "attributor"
39 
40 static cl::opt<bool> ManifestInternal(
41     "attributor-manifest-internal", cl::Hidden,
42     cl::desc("Manifest Attributor internal string attributes."),
43     cl::init(false));
44 
45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
46                                        cl::Hidden);
47 
48 template <>
49 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
50 
51 static cl::opt<unsigned, true> MaxPotentialValues(
52     "attributor-max-potential-values", cl::Hidden,
53     cl::desc("Maximum number of potential values to be "
54              "tracked for each position."),
55     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
56     cl::init(7));
57 
58 STATISTIC(NumAAs, "Number of abstract attributes created");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separately.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // Specialization of the operator<< for abstract attributes subclasses. This
105 // disambiguates situations where multiple operators are applicable.
106 namespace llvm {
107 #define PIPE_OPERATOR(CLASS)                                                   \
108   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
109     return OS << static_cast<const AbstractAttribute &>(AA);                   \
110   }
111 
112 PIPE_OPERATOR(AAIsDead)
113 PIPE_OPERATOR(AANoUnwind)
114 PIPE_OPERATOR(AANoSync)
115 PIPE_OPERATOR(AANoRecurse)
116 PIPE_OPERATOR(AAWillReturn)
117 PIPE_OPERATOR(AANoReturn)
118 PIPE_OPERATOR(AAReturnedValues)
119 PIPE_OPERATOR(AANonNull)
120 PIPE_OPERATOR(AANoAlias)
121 PIPE_OPERATOR(AADereferenceable)
122 PIPE_OPERATOR(AAAlign)
123 PIPE_OPERATOR(AANoCapture)
124 PIPE_OPERATOR(AAValueSimplify)
125 PIPE_OPERATOR(AANoFree)
126 PIPE_OPERATOR(AAHeapToStack)
127 PIPE_OPERATOR(AAReachability)
128 PIPE_OPERATOR(AAMemoryBehavior)
129 PIPE_OPERATOR(AAMemoryLocation)
130 PIPE_OPERATOR(AAValueConstantRange)
131 PIPE_OPERATOR(AAPrivatizablePtr)
132 PIPE_OPERATOR(AAUndefinedBehavior)
133 PIPE_OPERATOR(AAPotentialValues)
134 PIPE_OPERATOR(AANoUndef)
135 
136 #undef PIPE_OPERATOR
137 } // namespace llvm
138 
139 namespace {
140 
141 static Optional<ConstantInt *>
142 getAssumedConstantInt(Attributor &A, const Value &V,
143                       const AbstractAttribute &AA,
144                       bool &UsedAssumedInformation) {
145   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
146   if (C.hasValue())
147     return dyn_cast_or_null<ConstantInt>(C.getValue());
148   return llvm::None;
149 }
150 
151 /// Get pointer operand of memory accessing instruction. If \p I is
152 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
153 /// is set to false and the instruction is volatile, return nullptr.
154 static const Value *getPointerOperand(const Instruction *I,
155                                       bool AllowVolatile) {
156   if (auto *LI = dyn_cast<LoadInst>(I)) {
157     if (!AllowVolatile && LI->isVolatile())
158       return nullptr;
159     return LI->getPointerOperand();
160   }
161 
162   if (auto *SI = dyn_cast<StoreInst>(I)) {
163     if (!AllowVolatile && SI->isVolatile())
164       return nullptr;
165     return SI->getPointerOperand();
166   }
167 
168   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
169     if (!AllowVolatile && CXI->isVolatile())
170       return nullptr;
171     return CXI->getPointerOperand();
172   }
173 
174   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
175     if (!AllowVolatile && RMWI->isVolatile())
176       return nullptr;
177     return RMWI->getPointerOperand();
178   }
179 
180   return nullptr;
181 }
182 
183 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
184 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
185 /// getelement pointer instructions that traverse the natural type of \p Ptr if
186 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
187 /// through a cast to i8*.
188 ///
189 /// TODO: This could probably live somewhere more prominantly if it doesn't
190 ///       already exist.
191 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
192                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
193   assert(Offset >= 0 && "Negative offset not supported yet!");
194   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
195                     << "-bytes as " << *ResTy << "\n");
196 
197   // The initial type we are trying to traverse to get nice GEPs.
198   Type *Ty = Ptr->getType();
199 
200   SmallVector<Value *, 4> Indices;
201   std::string GEPName = Ptr->getName().str();
202   while (Offset) {
203     uint64_t Idx, Rem;
204 
205     if (auto *STy = dyn_cast<StructType>(Ty)) {
206       const StructLayout *SL = DL.getStructLayout(STy);
207       if (int64_t(SL->getSizeInBytes()) < Offset)
208         break;
209       Idx = SL->getElementContainingOffset(Offset);
210       assert(Idx < STy->getNumElements() && "Offset calculation error!");
211       Rem = Offset - SL->getElementOffset(Idx);
212       Ty = STy->getElementType(Idx);
213     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
214       Ty = PTy->getElementType();
215       if (!Ty->isSized())
216         break;
217       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
218       assert(ElementSize && "Expected type with size!");
219       Idx = Offset / ElementSize;
220       Rem = Offset % ElementSize;
221     } else {
222       // Non-aggregate type, we cast and make byte-wise progress now.
223       break;
224     }
225 
226     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
227                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
228 
229     GEPName += "." + std::to_string(Idx);
230     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
231     Offset = Rem;
232   }
233 
234   // Create a GEP if we collected indices above.
235   if (Indices.size())
236     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
237 
238   // If an offset is left we use byte-wise adjustment.
239   if (Offset) {
240     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
241     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
242                         GEPName + ".b" + Twine(Offset));
243   }
244 
245   // Ensure the result has the requested type.
246   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
247 
248   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
249   return Ptr;
250 }
251 
252 /// Recursively visit all values that might become \p IRP at some point. This
253 /// will be done by looking through cast instructions, selects, phis, and calls
254 /// with the "returned" attribute. Once we cannot look through the value any
255 /// further, the callback \p VisitValueCB is invoked and passed the current
256 /// value, the \p State, and a flag to indicate if we stripped anything.
257 /// Stripped means that we unpacked the value associated with \p IRP at least
258 /// once. Note that the value used for the callback may still be the value
259 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
260 /// we will never visit more values than specified by \p MaxValues.
261 template <typename AAType, typename StateTy>
262 static bool genericValueTraversal(
263     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
264     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
265         VisitValueCB,
266     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
267     function_ref<Value *(Value *)> StripCB = nullptr) {
268 
269   const AAIsDead *LivenessAA = nullptr;
270   if (IRP.getAnchorScope())
271     LivenessAA = &A.getAAFor<AAIsDead>(
272         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
273         /* TrackDependence */ false);
274   bool AnyDead = false;
275 
276   using Item = std::pair<Value *, const Instruction *>;
277   SmallSet<Item, 16> Visited;
278   SmallVector<Item, 16> Worklist;
279   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
280 
281   int Iteration = 0;
282   do {
283     Item I = Worklist.pop_back_val();
284     Value *V = I.first;
285     CtxI = I.second;
286     if (StripCB)
287       V = StripCB(V);
288 
289     // Check if we should process the current value. To prevent endless
290     // recursion keep a record of the values we followed!
291     if (!Visited.insert(I).second)
292       continue;
293 
294     // Make sure we limit the compile time for complex expressions.
295     if (Iteration++ >= MaxValues)
296       return false;
297 
298     // Explicitly look through calls with a "returned" attribute if we do
299     // not have a pointer as stripPointerCasts only works on them.
300     Value *NewV = nullptr;
301     if (V->getType()->isPointerTy()) {
302       NewV = V->stripPointerCasts();
303     } else {
304       auto *CB = dyn_cast<CallBase>(V);
305       if (CB && CB->getCalledFunction()) {
306         for (Argument &Arg : CB->getCalledFunction()->args())
307           if (Arg.hasReturnedAttr()) {
308             NewV = CB->getArgOperand(Arg.getArgNo());
309             break;
310           }
311       }
312     }
313     if (NewV && NewV != V) {
314       Worklist.push_back({NewV, CtxI});
315       continue;
316     }
317 
318     // Look through select instructions, visit both potential values.
319     if (auto *SI = dyn_cast<SelectInst>(V)) {
320       Worklist.push_back({SI->getTrueValue(), CtxI});
321       Worklist.push_back({SI->getFalseValue(), CtxI});
322       continue;
323     }
324 
325     // Look through phi nodes, visit all live operands.
326     if (auto *PHI = dyn_cast<PHINode>(V)) {
327       assert(LivenessAA &&
328              "Expected liveness in the presence of instructions!");
329       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
330         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
331         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
332                             LivenessAA,
333                             /* CheckBBLivenessOnly */ true)) {
334           AnyDead = true;
335           continue;
336         }
337         Worklist.push_back(
338             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
339       }
340       continue;
341     }
342 
343     if (UseValueSimplify && !isa<Constant>(V)) {
344       bool UsedAssumedInformation = false;
345       Optional<Constant *> C =
346           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
347       if (!C.hasValue())
348         continue;
349       if (Value *NewV = C.getValue()) {
350         Worklist.push_back({NewV, CtxI});
351         continue;
352       }
353     }
354 
355     // Once a leaf is reached we inform the user through the callback.
356     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
357       return false;
358   } while (!Worklist.empty());
359 
360   // If we actually used liveness information so we have to record a dependence.
361   if (AnyDead)
362     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
363 
364   // All values have been visited.
365   return true;
366 }
367 
368 const Value *stripAndAccumulateMinimalOffsets(
369     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
370     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
371     bool UseAssumed = false) {
372 
373   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
374     const IRPosition &Pos = IRPosition::value(V);
375     // Only track dependence if we are going to use the assumed info.
376     const AAValueConstantRange &ValueConstantRangeAA =
377         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
378                                          /* TrackDependence */ UseAssumed);
379     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
380                                      : ValueConstantRangeAA.getKnown();
381     // We can only use the lower part of the range because the upper part can
382     // be higher than what the value can really be.
383     ROffset = Range.getSignedMin();
384     return true;
385   };
386 
387   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
388                                                 AttributorAnalysis);
389 }
390 
391 static const Value *getMinimalBaseOfAccsesPointerOperand(
392     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
393     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
394   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
395   if (!Ptr)
396     return nullptr;
397   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
398   const Value *Base = stripAndAccumulateMinimalOffsets(
399       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
400 
401   BytesOffset = OffsetAPInt.getSExtValue();
402   return Base;
403 }
404 
405 static const Value *
406 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
407                                      const DataLayout &DL,
408                                      bool AllowNonInbounds = false) {
409   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
410   if (!Ptr)
411     return nullptr;
412 
413   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
414                                           AllowNonInbounds);
415 }
416 
417 /// Helper function to clamp a state \p S of type \p StateType with the
418 /// information in \p R and indicate/return if \p S did change (as-in update is
419 /// required to be run again).
420 template <typename StateType>
421 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
422   auto Assumed = S.getAssumed();
423   S ^= R;
424   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
425                                    : ChangeStatus::CHANGED;
426 }
427 
428 /// Clamp the information known for all returned values of a function
429 /// (identified by \p QueryingAA) into \p S.
430 template <typename AAType, typename StateType = typename AAType::StateType>
431 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
432                                      StateType &S) {
433   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
434                     << QueryingAA << " into " << S << "\n");
435 
436   assert((QueryingAA.getIRPosition().getPositionKind() ==
437               IRPosition::IRP_RETURNED ||
438           QueryingAA.getIRPosition().getPositionKind() ==
439               IRPosition::IRP_CALL_SITE_RETURNED) &&
440          "Can only clamp returned value states for a function returned or call "
441          "site returned position!");
442 
443   // Use an optional state as there might not be any return values and we want
444   // to join (IntegerState::operator&) the state of all there are.
445   Optional<StateType> T;
446 
447   // Callback for each possibly returned value.
448   auto CheckReturnValue = [&](Value &RV) -> bool {
449     const IRPosition &RVPos = IRPosition::value(RV);
450     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
451     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
452                       << " @ " << RVPos << "\n");
453     const StateType &AAS = AA.getState();
454     if (T.hasValue())
455       *T &= AAS;
456     else
457       T = AAS;
458     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
459                       << "\n");
460     return T->isValidState();
461   };
462 
463   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
464     S.indicatePessimisticFixpoint();
465   else if (T.hasValue())
466     S ^= *T;
467 }
468 
469 /// Helper class for generic deduction: return value -> returned position.
470 template <typename AAType, typename BaseType,
471           typename StateType = typename BaseType::StateType>
472 struct AAReturnedFromReturnedValues : public BaseType {
473   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
474       : BaseType(IRP, A) {}
475 
476   /// See AbstractAttribute::updateImpl(...).
477   ChangeStatus updateImpl(Attributor &A) override {
478     StateType S(StateType::getBestState(this->getState()));
479     clampReturnedValueStates<AAType, StateType>(A, *this, S);
480     // TODO: If we know we visited all returned values, thus no are assumed
481     // dead, we can take the known information from the state T.
482     return clampStateAndIndicateChange<StateType>(this->getState(), S);
483   }
484 };
485 
486 /// Clamp the information known at all call sites for a given argument
487 /// (identified by \p QueryingAA) into \p S.
488 template <typename AAType, typename StateType = typename AAType::StateType>
489 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
490                                         StateType &S) {
491   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
492                     << QueryingAA << " into " << S << "\n");
493 
494   assert(QueryingAA.getIRPosition().getPositionKind() ==
495              IRPosition::IRP_ARGUMENT &&
496          "Can only clamp call site argument states for an argument position!");
497 
498   // Use an optional state as there might not be any return values and we want
499   // to join (IntegerState::operator&) the state of all there are.
500   Optional<StateType> T;
501 
502   // The argument number which is also the call site argument number.
503   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
504 
505   auto CallSiteCheck = [&](AbstractCallSite ACS) {
506     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
507     // Check if a coresponding argument was found or if it is on not associated
508     // (which can happen for callback calls).
509     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
510       return false;
511 
512     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
513     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
514                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
515     const StateType &AAS = AA.getState();
516     if (T.hasValue())
517       *T &= AAS;
518     else
519       T = AAS;
520     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
521                       << "\n");
522     return T->isValidState();
523   };
524 
525   bool AllCallSitesKnown;
526   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
527                               AllCallSitesKnown))
528     S.indicatePessimisticFixpoint();
529   else if (T.hasValue())
530     S ^= *T;
531 }
532 
533 /// Helper class for generic deduction: call site argument -> argument position.
534 template <typename AAType, typename BaseType,
535           typename StateType = typename AAType::StateType>
536 struct AAArgumentFromCallSiteArguments : public BaseType {
537   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
538       : BaseType(IRP, A) {}
539 
540   /// See AbstractAttribute::updateImpl(...).
541   ChangeStatus updateImpl(Attributor &A) override {
542     StateType S(StateType::getBestState(this->getState()));
543     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
544     // TODO: If we know we visited all incoming values, thus no are assumed
545     // dead, we can take the known information from the state T.
546     return clampStateAndIndicateChange<StateType>(this->getState(), S);
547   }
548 };
549 
550 /// Helper class for generic replication: function returned -> cs returned.
551 template <typename AAType, typename BaseType,
552           typename StateType = typename BaseType::StateType>
553 struct AACallSiteReturnedFromReturned : public BaseType {
554   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
555       : BaseType(IRP, A) {}
556 
557   /// See AbstractAttribute::updateImpl(...).
558   ChangeStatus updateImpl(Attributor &A) override {
559     assert(this->getIRPosition().getPositionKind() ==
560                IRPosition::IRP_CALL_SITE_RETURNED &&
561            "Can only wrap function returned positions for call site returned "
562            "positions!");
563     auto &S = this->getState();
564 
565     const Function *AssociatedFunction =
566         this->getIRPosition().getAssociatedFunction();
567     if (!AssociatedFunction)
568       return S.indicatePessimisticFixpoint();
569 
570     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
571     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
572     return clampStateAndIndicateChange(S, AA.getState());
573   }
574 };
575 
576 /// Helper function to accumulate uses.
577 template <class AAType, typename StateType = typename AAType::StateType>
578 static void followUsesInContext(AAType &AA, Attributor &A,
579                                 MustBeExecutedContextExplorer &Explorer,
580                                 const Instruction *CtxI,
581                                 SetVector<const Use *> &Uses,
582                                 StateType &State) {
583   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
584   for (unsigned u = 0; u < Uses.size(); ++u) {
585     const Use *U = Uses[u];
586     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
587       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
588       if (Found && AA.followUseInMBEC(A, U, UserI, State))
589         for (const Use &Us : UserI->uses())
590           Uses.insert(&Us);
591     }
592   }
593 }
594 
595 /// Use the must-be-executed-context around \p I to add information into \p S.
596 /// The AAType class is required to have `followUseInMBEC` method with the
597 /// following signature and behaviour:
598 ///
599 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
600 /// U - Underlying use.
601 /// I - The user of the \p U.
602 /// Returns true if the value should be tracked transitively.
603 ///
604 template <class AAType, typename StateType = typename AAType::StateType>
605 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
606                              Instruction &CtxI) {
607 
608   // Container for (transitive) uses of the associated value.
609   SetVector<const Use *> Uses;
610   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
611     Uses.insert(&U);
612 
613   MustBeExecutedContextExplorer &Explorer =
614       A.getInfoCache().getMustBeExecutedContextExplorer();
615 
616   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
617 
618   if (S.isAtFixpoint())
619     return;
620 
621   SmallVector<const BranchInst *, 4> BrInsts;
622   auto Pred = [&](const Instruction *I) {
623     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
624       if (Br->isConditional())
625         BrInsts.push_back(Br);
626     return true;
627   };
628 
629   // Here, accumulate conditional branch instructions in the context. We
630   // explore the child paths and collect the known states. The disjunction of
631   // those states can be merged to its own state. Let ParentState_i be a state
632   // to indicate the known information for an i-th branch instruction in the
633   // context. ChildStates are created for its successors respectively.
634   //
635   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
636   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
637   //      ...
638   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
639   //
640   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
641   //
642   // FIXME: Currently, recursive branches are not handled. For example, we
643   // can't deduce that ptr must be dereferenced in below function.
644   //
645   // void f(int a, int c, int *ptr) {
646   //    if(a)
647   //      if (b) {
648   //        *ptr = 0;
649   //      } else {
650   //        *ptr = 1;
651   //      }
652   //    else {
653   //      if (b) {
654   //        *ptr = 0;
655   //      } else {
656   //        *ptr = 1;
657   //      }
658   //    }
659   // }
660 
661   Explorer.checkForAllContext(&CtxI, Pred);
662   for (const BranchInst *Br : BrInsts) {
663     StateType ParentState;
664 
665     // The known state of the parent state is a conjunction of children's
666     // known states so it is initialized with a best state.
667     ParentState.indicateOptimisticFixpoint();
668 
669     for (const BasicBlock *BB : Br->successors()) {
670       StateType ChildState;
671 
672       size_t BeforeSize = Uses.size();
673       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
674 
675       // Erase uses which only appear in the child.
676       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
677         It = Uses.erase(It);
678 
679       ParentState &= ChildState;
680     }
681 
682     // Use only known state.
683     S += ParentState;
684   }
685 }
686 
687 /// -----------------------NoUnwind Function Attribute--------------------------
688 
689 struct AANoUnwindImpl : AANoUnwind {
690   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
691 
692   const std::string getAsStr() const override {
693     return getAssumed() ? "nounwind" : "may-unwind";
694   }
695 
696   /// See AbstractAttribute::updateImpl(...).
697   ChangeStatus updateImpl(Attributor &A) override {
698     auto Opcodes = {
699         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
700         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
701         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
702 
703     auto CheckForNoUnwind = [&](Instruction &I) {
704       if (!I.mayThrow())
705         return true;
706 
707       if (const auto *CB = dyn_cast<CallBase>(&I)) {
708         const auto &NoUnwindAA =
709             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
710         return NoUnwindAA.isAssumedNoUnwind();
711       }
712       return false;
713     };
714 
715     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
716       return indicatePessimisticFixpoint();
717 
718     return ChangeStatus::UNCHANGED;
719   }
720 };
721 
722 struct AANoUnwindFunction final : public AANoUnwindImpl {
723   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
724       : AANoUnwindImpl(IRP, A) {}
725 
726   /// See AbstractAttribute::trackStatistics()
727   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
728 };
729 
730 /// NoUnwind attribute deduction for a call sites.
731 struct AANoUnwindCallSite final : AANoUnwindImpl {
732   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
733       : AANoUnwindImpl(IRP, A) {}
734 
735   /// See AbstractAttribute::initialize(...).
736   void initialize(Attributor &A) override {
737     AANoUnwindImpl::initialize(A);
738     Function *F = getAssociatedFunction();
739     if (!F || F->isDeclaration())
740       indicatePessimisticFixpoint();
741   }
742 
743   /// See AbstractAttribute::updateImpl(...).
744   ChangeStatus updateImpl(Attributor &A) override {
745     // TODO: Once we have call site specific value information we can provide
746     //       call site specific liveness information and then it makes
747     //       sense to specialize attributes for call sites arguments instead of
748     //       redirecting requests to the callee argument.
749     Function *F = getAssociatedFunction();
750     const IRPosition &FnPos = IRPosition::function(*F);
751     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
752     return clampStateAndIndicateChange(getState(), FnAA.getState());
753   }
754 
755   /// See AbstractAttribute::trackStatistics()
756   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
757 };
758 
759 /// --------------------- Function Return Values -------------------------------
760 
761 /// "Attribute" that collects all potential returned values and the return
762 /// instructions that they arise from.
763 ///
764 /// If there is a unique returned value R, the manifest method will:
765 ///   - mark R with the "returned" attribute, if R is an argument.
766 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
767 
768   /// Mapping of values potentially returned by the associated function to the
769   /// return instructions that might return them.
770   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
771 
772   /// Mapping to remember the number of returned values for a call site such
773   /// that we can avoid updates if nothing changed.
774   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
775 
776   /// Set of unresolved calls returned by the associated function.
777   SmallSetVector<CallBase *, 4> UnresolvedCalls;
778 
779   /// State flags
780   ///
781   ///{
782   bool IsFixed = false;
783   bool IsValidState = true;
784   ///}
785 
786 public:
787   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
788       : AAReturnedValues(IRP, A) {}
789 
790   /// See AbstractAttribute::initialize(...).
791   void initialize(Attributor &A) override {
792     // Reset the state.
793     IsFixed = false;
794     IsValidState = true;
795     ReturnedValues.clear();
796 
797     Function *F = getAssociatedFunction();
798     if (!F || F->isDeclaration()) {
799       indicatePessimisticFixpoint();
800       return;
801     }
802     assert(!F->getReturnType()->isVoidTy() &&
803            "Did not expect a void return type!");
804 
805     // The map from instruction opcodes to those instructions in the function.
806     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
807 
808     // Look through all arguments, if one is marked as returned we are done.
809     for (Argument &Arg : F->args()) {
810       if (Arg.hasReturnedAttr()) {
811         auto &ReturnInstSet = ReturnedValues[&Arg];
812         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
813           for (Instruction *RI : *Insts)
814             ReturnInstSet.insert(cast<ReturnInst>(RI));
815 
816         indicateOptimisticFixpoint();
817         return;
818       }
819     }
820 
821     if (!A.isFunctionIPOAmendable(*F))
822       indicatePessimisticFixpoint();
823   }
824 
825   /// See AbstractAttribute::manifest(...).
826   ChangeStatus manifest(Attributor &A) override;
827 
828   /// See AbstractAttribute::getState(...).
829   AbstractState &getState() override { return *this; }
830 
831   /// See AbstractAttribute::getState(...).
832   const AbstractState &getState() const override { return *this; }
833 
834   /// See AbstractAttribute::updateImpl(Attributor &A).
835   ChangeStatus updateImpl(Attributor &A) override;
836 
837   llvm::iterator_range<iterator> returned_values() override {
838     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
839   }
840 
841   llvm::iterator_range<const_iterator> returned_values() const override {
842     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
843   }
844 
845   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
846     return UnresolvedCalls;
847   }
848 
849   /// Return the number of potential return values, -1 if unknown.
850   size_t getNumReturnValues() const override {
851     return isValidState() ? ReturnedValues.size() : -1;
852   }
853 
854   /// Return an assumed unique return value if a single candidate is found. If
855   /// there cannot be one, return a nullptr. If it is not clear yet, return the
856   /// Optional::NoneType.
857   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
858 
859   /// See AbstractState::checkForAllReturnedValues(...).
860   bool checkForAllReturnedValuesAndReturnInsts(
861       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
862       const override;
863 
864   /// Pretty print the attribute similar to the IR representation.
865   const std::string getAsStr() const override;
866 
867   /// See AbstractState::isAtFixpoint().
868   bool isAtFixpoint() const override { return IsFixed; }
869 
870   /// See AbstractState::isValidState().
871   bool isValidState() const override { return IsValidState; }
872 
873   /// See AbstractState::indicateOptimisticFixpoint(...).
874   ChangeStatus indicateOptimisticFixpoint() override {
875     IsFixed = true;
876     return ChangeStatus::UNCHANGED;
877   }
878 
879   ChangeStatus indicatePessimisticFixpoint() override {
880     IsFixed = true;
881     IsValidState = false;
882     return ChangeStatus::CHANGED;
883   }
884 };
885 
886 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
887   ChangeStatus Changed = ChangeStatus::UNCHANGED;
888 
889   // Bookkeeping.
890   assert(isValidState());
891   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
892                   "Number of function with known return values");
893 
894   // Check if we have an assumed unique return value that we could manifest.
895   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
896 
897   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
898     return Changed;
899 
900   // Bookkeeping.
901   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
902                   "Number of function with unique return");
903 
904   // Callback to replace the uses of CB with the constant C.
905   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
906     if (CB.use_empty())
907       return ChangeStatus::UNCHANGED;
908     if (A.changeValueAfterManifest(CB, C))
909       return ChangeStatus::CHANGED;
910     return ChangeStatus::UNCHANGED;
911   };
912 
913   // If the assumed unique return value is an argument, annotate it.
914   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
915     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
916             getAssociatedFunction()->getReturnType())) {
917       getIRPosition() = IRPosition::argument(*UniqueRVArg);
918       Changed = IRAttribute::manifest(A);
919     }
920   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
921     // We can replace the returned value with the unique returned constant.
922     Value &AnchorValue = getAnchorValue();
923     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
924       for (const Use &U : F->uses())
925         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
926           if (CB->isCallee(&U)) {
927             Constant *RVCCast =
928                 CB->getType() == RVC->getType()
929                     ? RVC
930                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
931             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
932           }
933     } else {
934       assert(isa<CallBase>(AnchorValue) &&
935              "Expcected a function or call base anchor!");
936       Constant *RVCCast =
937           AnchorValue.getType() == RVC->getType()
938               ? RVC
939               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
940       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
941     }
942     if (Changed == ChangeStatus::CHANGED)
943       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
944                       "Number of function returns replaced by constant return");
945   }
946 
947   return Changed;
948 }
949 
950 const std::string AAReturnedValuesImpl::getAsStr() const {
951   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
952          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
953          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
954 }
955 
956 Optional<Value *>
957 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
958   // If checkForAllReturnedValues provides a unique value, ignoring potential
959   // undef values that can also be present, it is assumed to be the actual
960   // return value and forwarded to the caller of this method. If there are
961   // multiple, a nullptr is returned indicating there cannot be a unique
962   // returned value.
963   Optional<Value *> UniqueRV;
964 
965   auto Pred = [&](Value &RV) -> bool {
966     // If we found a second returned value and neither the current nor the saved
967     // one is an undef, there is no unique returned value. Undefs are special
968     // since we can pretend they have any value.
969     if (UniqueRV.hasValue() && UniqueRV != &RV &&
970         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
971       UniqueRV = nullptr;
972       return false;
973     }
974 
975     // Do not overwrite a value with an undef.
976     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
977       UniqueRV = &RV;
978 
979     return true;
980   };
981 
982   if (!A.checkForAllReturnedValues(Pred, *this))
983     UniqueRV = nullptr;
984 
985   return UniqueRV;
986 }
987 
988 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
989     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
990     const {
991   if (!isValidState())
992     return false;
993 
994   // Check all returned values but ignore call sites as long as we have not
995   // encountered an overdefined one during an update.
996   for (auto &It : ReturnedValues) {
997     Value *RV = It.first;
998 
999     CallBase *CB = dyn_cast<CallBase>(RV);
1000     if (CB && !UnresolvedCalls.count(CB))
1001       continue;
1002 
1003     if (!Pred(*RV, It.second))
1004       return false;
1005   }
1006 
1007   return true;
1008 }
1009 
1010 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1011   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1012   bool Changed = false;
1013 
1014   // State used in the value traversals starting in returned values.
1015   struct RVState {
1016     // The map in which we collect return values -> return instrs.
1017     decltype(ReturnedValues) &RetValsMap;
1018     // The flag to indicate a change.
1019     bool &Changed;
1020     // The return instrs we come from.
1021     SmallSetVector<ReturnInst *, 4> RetInsts;
1022   };
1023 
1024   // Callback for a leaf value returned by the associated function.
1025   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1026                          bool) -> bool {
1027     auto Size = RVS.RetValsMap[&Val].size();
1028     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1029     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1030     RVS.Changed |= Inserted;
1031     LLVM_DEBUG({
1032       if (Inserted)
1033         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1034                << " => " << RVS.RetInsts.size() << "\n";
1035     });
1036     return true;
1037   };
1038 
1039   // Helper method to invoke the generic value traversal.
1040   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1041                                 const Instruction *CtxI) {
1042     IRPosition RetValPos = IRPosition::value(RV);
1043     return genericValueTraversal<AAReturnedValues, RVState>(
1044         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1045         /* UseValueSimplify */ false);
1046   };
1047 
1048   // Callback for all "return intructions" live in the associated function.
1049   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1050     ReturnInst &Ret = cast<ReturnInst>(I);
1051     RVState RVS({ReturnedValues, Changed, {}});
1052     RVS.RetInsts.insert(&Ret);
1053     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1054   };
1055 
1056   // Start by discovering returned values from all live returned instructions in
1057   // the associated function.
1058   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1059     return indicatePessimisticFixpoint();
1060 
1061   // Once returned values "directly" present in the code are handled we try to
1062   // resolve returned calls. To avoid modifications to the ReturnedValues map
1063   // while we iterate over it we kept record of potential new entries in a copy
1064   // map, NewRVsMap.
1065   decltype(ReturnedValues) NewRVsMap;
1066 
1067   auto HandleReturnValue = [&](Value *RV,
1068                                SmallSetVector<ReturnInst *, 4> &RIs) {
1069     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1070                       << RIs.size() << " RIs\n");
1071     CallBase *CB = dyn_cast<CallBase>(RV);
1072     if (!CB || UnresolvedCalls.count(CB))
1073       return;
1074 
1075     if (!CB->getCalledFunction()) {
1076       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1077                         << "\n");
1078       UnresolvedCalls.insert(CB);
1079       return;
1080     }
1081 
1082     // TODO: use the function scope once we have call site AAReturnedValues.
1083     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1084         *this, IRPosition::function(*CB->getCalledFunction()));
1085     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1086                       << RetValAA << "\n");
1087 
1088     // Skip dead ends, thus if we do not know anything about the returned
1089     // call we mark it as unresolved and it will stay that way.
1090     if (!RetValAA.getState().isValidState()) {
1091       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1092                         << "\n");
1093       UnresolvedCalls.insert(CB);
1094       return;
1095     }
1096 
1097     // Do not try to learn partial information. If the callee has unresolved
1098     // return values we will treat the call as unresolved/opaque.
1099     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1100     if (!RetValAAUnresolvedCalls.empty()) {
1101       UnresolvedCalls.insert(CB);
1102       return;
1103     }
1104 
1105     // Now check if we can track transitively returned values. If possible, thus
1106     // if all return value can be represented in the current scope, do so.
1107     bool Unresolved = false;
1108     for (auto &RetValAAIt : RetValAA.returned_values()) {
1109       Value *RetVal = RetValAAIt.first;
1110       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1111           isa<Constant>(RetVal))
1112         continue;
1113       // Anything that did not fit in the above categories cannot be resolved,
1114       // mark the call as unresolved.
1115       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1116                            "cannot be translated: "
1117                         << *RetVal << "\n");
1118       UnresolvedCalls.insert(CB);
1119       Unresolved = true;
1120       break;
1121     }
1122 
1123     if (Unresolved)
1124       return;
1125 
1126     // Now track transitively returned values.
1127     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1128     if (NumRetAA == RetValAA.getNumReturnValues()) {
1129       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1130                            "changed since it was seen last\n");
1131       return;
1132     }
1133     NumRetAA = RetValAA.getNumReturnValues();
1134 
1135     for (auto &RetValAAIt : RetValAA.returned_values()) {
1136       Value *RetVal = RetValAAIt.first;
1137       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1138         // Arguments are mapped to call site operands and we begin the traversal
1139         // again.
1140         bool Unused = false;
1141         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1142         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1143         continue;
1144       }
1145       if (isa<CallBase>(RetVal)) {
1146         // Call sites are resolved by the callee attribute over time, no need to
1147         // do anything for us.
1148         continue;
1149       }
1150       if (isa<Constant>(RetVal)) {
1151         // Constants are valid everywhere, we can simply take them.
1152         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1153         continue;
1154       }
1155     }
1156   };
1157 
1158   for (auto &It : ReturnedValues)
1159     HandleReturnValue(It.first, It.second);
1160 
1161   // Because processing the new information can again lead to new return values
1162   // we have to be careful and iterate until this iteration is complete. The
1163   // idea is that we are in a stable state at the end of an update. All return
1164   // values have been handled and properly categorized. We might not update
1165   // again if we have not requested a non-fix attribute so we cannot "wait" for
1166   // the next update to analyze a new return value.
1167   while (!NewRVsMap.empty()) {
1168     auto It = std::move(NewRVsMap.back());
1169     NewRVsMap.pop_back();
1170 
1171     assert(!It.second.empty() && "Entry does not add anything.");
1172     auto &ReturnInsts = ReturnedValues[It.first];
1173     for (ReturnInst *RI : It.second)
1174       if (ReturnInsts.insert(RI)) {
1175         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1176                           << *It.first << " => " << *RI << "\n");
1177         HandleReturnValue(It.first, ReturnInsts);
1178         Changed = true;
1179       }
1180   }
1181 
1182   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1183   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1184 }
1185 
1186 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1187   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1188       : AAReturnedValuesImpl(IRP, A) {}
1189 
1190   /// See AbstractAttribute::trackStatistics()
1191   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1192 };
1193 
1194 /// Returned values information for a call sites.
1195 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1196   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1197       : AAReturnedValuesImpl(IRP, A) {}
1198 
1199   /// See AbstractAttribute::initialize(...).
1200   void initialize(Attributor &A) override {
1201     // TODO: Once we have call site specific value information we can provide
1202     //       call site specific liveness information and then it makes
1203     //       sense to specialize attributes for call sites instead of
1204     //       redirecting requests to the callee.
1205     llvm_unreachable("Abstract attributes for returned values are not "
1206                      "supported for call sites yet!");
1207   }
1208 
1209   /// See AbstractAttribute::updateImpl(...).
1210   ChangeStatus updateImpl(Attributor &A) override {
1211     return indicatePessimisticFixpoint();
1212   }
1213 
1214   /// See AbstractAttribute::trackStatistics()
1215   void trackStatistics() const override {}
1216 };
1217 
1218 /// ------------------------ NoSync Function Attribute -------------------------
1219 
1220 struct AANoSyncImpl : AANoSync {
1221   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1222 
1223   const std::string getAsStr() const override {
1224     return getAssumed() ? "nosync" : "may-sync";
1225   }
1226 
1227   /// See AbstractAttribute::updateImpl(...).
1228   ChangeStatus updateImpl(Attributor &A) override;
1229 
1230   /// Helper function used to determine whether an instruction is non-relaxed
1231   /// atomic. In other words, if an atomic instruction does not have unordered
1232   /// or monotonic ordering
1233   static bool isNonRelaxedAtomic(Instruction *I);
1234 
1235   /// Helper function used to determine whether an instruction is volatile.
1236   static bool isVolatile(Instruction *I);
1237 
1238   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1239   /// memset).
1240   static bool isNoSyncIntrinsic(Instruction *I);
1241 };
1242 
1243 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1244   if (!I->isAtomic())
1245     return false;
1246 
1247   AtomicOrdering Ordering;
1248   switch (I->getOpcode()) {
1249   case Instruction::AtomicRMW:
1250     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1251     break;
1252   case Instruction::Store:
1253     Ordering = cast<StoreInst>(I)->getOrdering();
1254     break;
1255   case Instruction::Load:
1256     Ordering = cast<LoadInst>(I)->getOrdering();
1257     break;
1258   case Instruction::Fence: {
1259     auto *FI = cast<FenceInst>(I);
1260     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1261       return false;
1262     Ordering = FI->getOrdering();
1263     break;
1264   }
1265   case Instruction::AtomicCmpXchg: {
1266     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1267     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1268     // Only if both are relaxed, than it can be treated as relaxed.
1269     // Otherwise it is non-relaxed.
1270     if (Success != AtomicOrdering::Unordered &&
1271         Success != AtomicOrdering::Monotonic)
1272       return true;
1273     if (Failure != AtomicOrdering::Unordered &&
1274         Failure != AtomicOrdering::Monotonic)
1275       return true;
1276     return false;
1277   }
1278   default:
1279     llvm_unreachable(
1280         "New atomic operations need to be known in the attributor.");
1281   }
1282 
1283   // Relaxed.
1284   if (Ordering == AtomicOrdering::Unordered ||
1285       Ordering == AtomicOrdering::Monotonic)
1286     return false;
1287   return true;
1288 }
1289 
1290 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1291 /// FIXME: We should ipmrove the handling of intrinsics.
1292 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1293   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1294     switch (II->getIntrinsicID()) {
1295     /// Element wise atomic memory intrinsics are can only be unordered,
1296     /// therefore nosync.
1297     case Intrinsic::memset_element_unordered_atomic:
1298     case Intrinsic::memmove_element_unordered_atomic:
1299     case Intrinsic::memcpy_element_unordered_atomic:
1300       return true;
1301     case Intrinsic::memset:
1302     case Intrinsic::memmove:
1303     case Intrinsic::memcpy:
1304       if (!cast<MemIntrinsic>(II)->isVolatile())
1305         return true;
1306       return false;
1307     default:
1308       return false;
1309     }
1310   }
1311   return false;
1312 }
1313 
1314 bool AANoSyncImpl::isVolatile(Instruction *I) {
1315   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1316 
1317   switch (I->getOpcode()) {
1318   case Instruction::AtomicRMW:
1319     return cast<AtomicRMWInst>(I)->isVolatile();
1320   case Instruction::Store:
1321     return cast<StoreInst>(I)->isVolatile();
1322   case Instruction::Load:
1323     return cast<LoadInst>(I)->isVolatile();
1324   case Instruction::AtomicCmpXchg:
1325     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1326   default:
1327     return false;
1328   }
1329 }
1330 
1331 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332 
1333   auto CheckRWInstForNoSync = [&](Instruction &I) {
1334     /// We are looking for volatile instructions or Non-Relaxed atomics.
1335     /// FIXME: We should improve the handling of intrinsics.
1336 
1337     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1338       return true;
1339 
1340     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1341       if (CB->hasFnAttr(Attribute::NoSync))
1342         return true;
1343 
1344       const auto &NoSyncAA =
1345           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1346       if (NoSyncAA.isAssumedNoSync())
1347         return true;
1348       return false;
1349     }
1350 
1351     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1352       return true;
1353 
1354     return false;
1355   };
1356 
1357   auto CheckForNoSync = [&](Instruction &I) {
1358     // At this point we handled all read/write effects and they are all
1359     // nosync, so they can be skipped.
1360     if (I.mayReadOrWriteMemory())
1361       return true;
1362 
1363     // non-convergent and readnone imply nosync.
1364     return !cast<CallBase>(I).isConvergent();
1365   };
1366 
1367   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1368       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1369     return indicatePessimisticFixpoint();
1370 
1371   return ChangeStatus::UNCHANGED;
1372 }
1373 
1374 struct AANoSyncFunction final : public AANoSyncImpl {
1375   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1376       : AANoSyncImpl(IRP, A) {}
1377 
1378   /// See AbstractAttribute::trackStatistics()
1379   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1380 };
1381 
1382 /// NoSync attribute deduction for a call sites.
1383 struct AANoSyncCallSite final : AANoSyncImpl {
1384   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1385       : AANoSyncImpl(IRP, A) {}
1386 
1387   /// See AbstractAttribute::initialize(...).
1388   void initialize(Attributor &A) override {
1389     AANoSyncImpl::initialize(A);
1390     Function *F = getAssociatedFunction();
1391     if (!F || F->isDeclaration())
1392       indicatePessimisticFixpoint();
1393   }
1394 
1395   /// See AbstractAttribute::updateImpl(...).
1396   ChangeStatus updateImpl(Attributor &A) override {
1397     // TODO: Once we have call site specific value information we can provide
1398     //       call site specific liveness information and then it makes
1399     //       sense to specialize attributes for call sites arguments instead of
1400     //       redirecting requests to the callee argument.
1401     Function *F = getAssociatedFunction();
1402     const IRPosition &FnPos = IRPosition::function(*F);
1403     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1404     return clampStateAndIndicateChange(getState(), FnAA.getState());
1405   }
1406 
1407   /// See AbstractAttribute::trackStatistics()
1408   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1409 };
1410 
1411 /// ------------------------ No-Free Attributes ----------------------------
1412 
1413 struct AANoFreeImpl : public AANoFree {
1414   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1415 
1416   /// See AbstractAttribute::updateImpl(...).
1417   ChangeStatus updateImpl(Attributor &A) override {
1418     auto CheckForNoFree = [&](Instruction &I) {
1419       const auto &CB = cast<CallBase>(I);
1420       if (CB.hasFnAttr(Attribute::NoFree))
1421         return true;
1422 
1423       const auto &NoFreeAA =
1424           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1425       return NoFreeAA.isAssumedNoFree();
1426     };
1427 
1428     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1429       return indicatePessimisticFixpoint();
1430     return ChangeStatus::UNCHANGED;
1431   }
1432 
1433   /// See AbstractAttribute::getAsStr().
1434   const std::string getAsStr() const override {
1435     return getAssumed() ? "nofree" : "may-free";
1436   }
1437 };
1438 
1439 struct AANoFreeFunction final : public AANoFreeImpl {
1440   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1441       : AANoFreeImpl(IRP, A) {}
1442 
1443   /// See AbstractAttribute::trackStatistics()
1444   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1445 };
1446 
1447 /// NoFree attribute deduction for a call sites.
1448 struct AANoFreeCallSite final : AANoFreeImpl {
1449   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1450       : AANoFreeImpl(IRP, A) {}
1451 
1452   /// See AbstractAttribute::initialize(...).
1453   void initialize(Attributor &A) override {
1454     AANoFreeImpl::initialize(A);
1455     Function *F = getAssociatedFunction();
1456     if (!F || F->isDeclaration())
1457       indicatePessimisticFixpoint();
1458   }
1459 
1460   /// See AbstractAttribute::updateImpl(...).
1461   ChangeStatus updateImpl(Attributor &A) override {
1462     // TODO: Once we have call site specific value information we can provide
1463     //       call site specific liveness information and then it makes
1464     //       sense to specialize attributes for call sites arguments instead of
1465     //       redirecting requests to the callee argument.
1466     Function *F = getAssociatedFunction();
1467     const IRPosition &FnPos = IRPosition::function(*F);
1468     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1469     return clampStateAndIndicateChange(getState(), FnAA.getState());
1470   }
1471 
1472   /// See AbstractAttribute::trackStatistics()
1473   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1474 };
1475 
1476 /// NoFree attribute for floating values.
1477 struct AANoFreeFloating : AANoFreeImpl {
1478   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1479       : AANoFreeImpl(IRP, A) {}
1480 
1481   /// See AbstractAttribute::trackStatistics()
1482   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1483 
1484   /// See Abstract Attribute::updateImpl(...).
1485   ChangeStatus updateImpl(Attributor &A) override {
1486     const IRPosition &IRP = getIRPosition();
1487 
1488     const auto &NoFreeAA =
1489         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1490     if (NoFreeAA.isAssumedNoFree())
1491       return ChangeStatus::UNCHANGED;
1492 
1493     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1494     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1495       Instruction *UserI = cast<Instruction>(U.getUser());
1496       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1497         if (CB->isBundleOperand(&U))
1498           return false;
1499         if (!CB->isArgOperand(&U))
1500           return true;
1501         unsigned ArgNo = CB->getArgOperandNo(&U);
1502 
1503         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1504             *this, IRPosition::callsite_argument(*CB, ArgNo));
1505         return NoFreeArg.isAssumedNoFree();
1506       }
1507 
1508       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1509           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1510         Follow = true;
1511         return true;
1512       }
1513       if (isa<ReturnInst>(UserI))
1514         return true;
1515 
1516       // Unknown user.
1517       return false;
1518     };
1519     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1520       return indicatePessimisticFixpoint();
1521 
1522     return ChangeStatus::UNCHANGED;
1523   }
1524 };
1525 
1526 /// NoFree attribute for a call site argument.
1527 struct AANoFreeArgument final : AANoFreeFloating {
1528   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1529       : AANoFreeFloating(IRP, A) {}
1530 
1531   /// See AbstractAttribute::trackStatistics()
1532   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1533 };
1534 
1535 /// NoFree attribute for call site arguments.
1536 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1537   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1538       : AANoFreeFloating(IRP, A) {}
1539 
1540   /// See AbstractAttribute::updateImpl(...).
1541   ChangeStatus updateImpl(Attributor &A) override {
1542     // TODO: Once we have call site specific value information we can provide
1543     //       call site specific liveness information and then it makes
1544     //       sense to specialize attributes for call sites arguments instead of
1545     //       redirecting requests to the callee argument.
1546     Argument *Arg = getAssociatedArgument();
1547     if (!Arg)
1548       return indicatePessimisticFixpoint();
1549     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1550     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1551     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1552   }
1553 
1554   /// See AbstractAttribute::trackStatistics()
1555   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1556 };
1557 
1558 /// NoFree attribute for function return value.
1559 struct AANoFreeReturned final : AANoFreeFloating {
1560   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1561       : AANoFreeFloating(IRP, A) {
1562     llvm_unreachable("NoFree is not applicable to function returns!");
1563   }
1564 
1565   /// See AbstractAttribute::initialize(...).
1566   void initialize(Attributor &A) override {
1567     llvm_unreachable("NoFree is not applicable to function returns!");
1568   }
1569 
1570   /// See AbstractAttribute::updateImpl(...).
1571   ChangeStatus updateImpl(Attributor &A) override {
1572     llvm_unreachable("NoFree is not applicable to function returns!");
1573   }
1574 
1575   /// See AbstractAttribute::trackStatistics()
1576   void trackStatistics() const override {}
1577 };
1578 
1579 /// NoFree attribute deduction for a call site return value.
1580 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1581   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1582       : AANoFreeFloating(IRP, A) {}
1583 
1584   ChangeStatus manifest(Attributor &A) override {
1585     return ChangeStatus::UNCHANGED;
1586   }
1587   /// See AbstractAttribute::trackStatistics()
1588   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1589 };
1590 
1591 /// ------------------------ NonNull Argument Attribute ------------------------
1592 static int64_t getKnownNonNullAndDerefBytesForUse(
1593     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1594     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1595   TrackUse = false;
1596 
1597   const Value *UseV = U->get();
1598   if (!UseV->getType()->isPointerTy())
1599     return 0;
1600 
1601   Type *PtrTy = UseV->getType();
1602   const Function *F = I->getFunction();
1603   bool NullPointerIsDefined =
1604       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1605   const DataLayout &DL = A.getInfoCache().getDL();
1606   if (const auto *CB = dyn_cast<CallBase>(I)) {
1607     if (CB->isBundleOperand(U)) {
1608       if (RetainedKnowledge RK = getKnowledgeFromUse(
1609               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1610         IsNonNull |=
1611             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1612         return RK.ArgValue;
1613       }
1614       return 0;
1615     }
1616 
1617     if (CB->isCallee(U)) {
1618       IsNonNull |= !NullPointerIsDefined;
1619       return 0;
1620     }
1621 
1622     unsigned ArgNo = CB->getArgOperandNo(U);
1623     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1624     // As long as we only use known information there is no need to track
1625     // dependences here.
1626     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1627                                                   /* TrackDependence */ false);
1628     IsNonNull |= DerefAA.isKnownNonNull();
1629     return DerefAA.getKnownDereferenceableBytes();
1630   }
1631 
1632   // We need to follow common pointer manipulation uses to the accesses they
1633   // feed into. We can try to be smart to avoid looking through things we do not
1634   // like for now, e.g., non-inbounds GEPs.
1635   if (isa<CastInst>(I)) {
1636     TrackUse = true;
1637     return 0;
1638   }
1639 
1640   if (isa<GetElementPtrInst>(I)) {
1641     TrackUse = true;
1642     return 0;
1643   }
1644 
1645   int64_t Offset;
1646   const Value *Base =
1647       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1648   if (Base) {
1649     if (Base == &AssociatedValue &&
1650         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1651       int64_t DerefBytes =
1652           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1653 
1654       IsNonNull |= !NullPointerIsDefined;
1655       return std::max(int64_t(0), DerefBytes);
1656     }
1657   }
1658 
1659   /// Corner case when an offset is 0.
1660   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1661                                               /*AllowNonInbounds*/ true);
1662   if (Base) {
1663     if (Offset == 0 && Base == &AssociatedValue &&
1664         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1665       int64_t DerefBytes =
1666           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1667       IsNonNull |= !NullPointerIsDefined;
1668       return std::max(int64_t(0), DerefBytes);
1669     }
1670   }
1671 
1672   return 0;
1673 }
1674 
1675 struct AANonNullImpl : AANonNull {
1676   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1677       : AANonNull(IRP, A),
1678         NullIsDefined(NullPointerIsDefined(
1679             getAnchorScope(),
1680             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1681 
1682   /// See AbstractAttribute::initialize(...).
1683   void initialize(Attributor &A) override {
1684     Value &V = getAssociatedValue();
1685     if (!NullIsDefined &&
1686         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1687                 /* IgnoreSubsumingPositions */ false, &A)) {
1688       indicateOptimisticFixpoint();
1689       return;
1690     }
1691 
1692     if (isa<ConstantPointerNull>(V)) {
1693       indicatePessimisticFixpoint();
1694       return;
1695     }
1696 
1697     AANonNull::initialize(A);
1698 
1699     bool CanBeNull = true;
1700     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1701       if (!CanBeNull) {
1702         indicateOptimisticFixpoint();
1703         return;
1704       }
1705     }
1706 
1707     if (isa<GlobalValue>(&getAssociatedValue())) {
1708       indicatePessimisticFixpoint();
1709       return;
1710     }
1711 
1712     if (Instruction *CtxI = getCtxI())
1713       followUsesInMBEC(*this, A, getState(), *CtxI);
1714   }
1715 
1716   /// See followUsesInMBEC
1717   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1718                        AANonNull::StateType &State) {
1719     bool IsNonNull = false;
1720     bool TrackUse = false;
1721     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1722                                        IsNonNull, TrackUse);
1723     State.setKnown(IsNonNull);
1724     return TrackUse;
1725   }
1726 
1727   /// See AbstractAttribute::getAsStr().
1728   const std::string getAsStr() const override {
1729     return getAssumed() ? "nonnull" : "may-null";
1730   }
1731 
1732   /// Flag to determine if the underlying value can be null and still allow
1733   /// valid accesses.
1734   const bool NullIsDefined;
1735 };
1736 
1737 /// NonNull attribute for a floating value.
1738 struct AANonNullFloating : public AANonNullImpl {
1739   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1740       : AANonNullImpl(IRP, A) {}
1741 
1742   /// See AbstractAttribute::updateImpl(...).
1743   ChangeStatus updateImpl(Attributor &A) override {
1744     const DataLayout &DL = A.getDataLayout();
1745 
1746     DominatorTree *DT = nullptr;
1747     AssumptionCache *AC = nullptr;
1748     InformationCache &InfoCache = A.getInfoCache();
1749     if (const Function *Fn = getAnchorScope()) {
1750       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1751       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1752     }
1753 
1754     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1755                             AANonNull::StateType &T, bool Stripped) -> bool {
1756       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1757       if (!Stripped && this == &AA) {
1758         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1759           T.indicatePessimisticFixpoint();
1760       } else {
1761         // Use abstract attribute information.
1762         const AANonNull::StateType &NS = AA.getState();
1763         T ^= NS;
1764       }
1765       return T.isValidState();
1766     };
1767 
1768     StateType T;
1769     if (!genericValueTraversal<AANonNull, StateType>(
1770             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1771       return indicatePessimisticFixpoint();
1772 
1773     return clampStateAndIndicateChange(getState(), T);
1774   }
1775 
1776   /// See AbstractAttribute::trackStatistics()
1777   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1778 };
1779 
1780 /// NonNull attribute for function return value.
1781 struct AANonNullReturned final
1782     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1783   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1784       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1785 
1786   /// See AbstractAttribute::getAsStr().
1787   const std::string getAsStr() const override {
1788     return getAssumed() ? "nonnull" : "may-null";
1789   }
1790 
1791   /// See AbstractAttribute::trackStatistics()
1792   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1793 };
1794 
1795 /// NonNull attribute for function argument.
1796 struct AANonNullArgument final
1797     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1798   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1799       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1803 };
1804 
1805 struct AANonNullCallSiteArgument final : AANonNullFloating {
1806   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1807       : AANonNullFloating(IRP, A) {}
1808 
1809   /// See AbstractAttribute::trackStatistics()
1810   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1811 };
1812 
1813 /// NonNull attribute for a call site return position.
1814 struct AANonNullCallSiteReturned final
1815     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1816   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1817       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1818 
1819   /// See AbstractAttribute::trackStatistics()
1820   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1821 };
1822 
1823 /// ------------------------ No-Recurse Attributes ----------------------------
1824 
1825 struct AANoRecurseImpl : public AANoRecurse {
1826   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1827 
1828   /// See AbstractAttribute::getAsStr()
1829   const std::string getAsStr() const override {
1830     return getAssumed() ? "norecurse" : "may-recurse";
1831   }
1832 };
1833 
1834 struct AANoRecurseFunction final : AANoRecurseImpl {
1835   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1836       : AANoRecurseImpl(IRP, A) {}
1837 
1838   /// See AbstractAttribute::initialize(...).
1839   void initialize(Attributor &A) override {
1840     AANoRecurseImpl::initialize(A);
1841     if (const Function *F = getAnchorScope())
1842       if (A.getInfoCache().getSccSize(*F) != 1)
1843         indicatePessimisticFixpoint();
1844   }
1845 
1846   /// See AbstractAttribute::updateImpl(...).
1847   ChangeStatus updateImpl(Attributor &A) override {
1848 
1849     // If all live call sites are known to be no-recurse, we are as well.
1850     auto CallSitePred = [&](AbstractCallSite ACS) {
1851       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1852           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1853           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1854       return NoRecurseAA.isKnownNoRecurse();
1855     };
1856     bool AllCallSitesKnown;
1857     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1858       // If we know all call sites and all are known no-recurse, we are done.
1859       // If all known call sites, which might not be all that exist, are known
1860       // to be no-recurse, we are not done but we can continue to assume
1861       // no-recurse. If one of the call sites we have not visited will become
1862       // live, another update is triggered.
1863       if (AllCallSitesKnown)
1864         indicateOptimisticFixpoint();
1865       return ChangeStatus::UNCHANGED;
1866     }
1867 
1868     // If the above check does not hold anymore we look at the calls.
1869     auto CheckForNoRecurse = [&](Instruction &I) {
1870       const auto &CB = cast<CallBase>(I);
1871       if (CB.hasFnAttr(Attribute::NoRecurse))
1872         return true;
1873 
1874       const auto &NoRecurseAA =
1875           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1876       if (!NoRecurseAA.isAssumedNoRecurse())
1877         return false;
1878 
1879       // Recursion to the same function
1880       if (CB.getCalledFunction() == getAnchorScope())
1881         return false;
1882 
1883       return true;
1884     };
1885 
1886     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1887       return indicatePessimisticFixpoint();
1888     return ChangeStatus::UNCHANGED;
1889   }
1890 
1891   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1892 };
1893 
1894 /// NoRecurse attribute deduction for a call sites.
1895 struct AANoRecurseCallSite final : AANoRecurseImpl {
1896   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1897       : AANoRecurseImpl(IRP, A) {}
1898 
1899   /// See AbstractAttribute::initialize(...).
1900   void initialize(Attributor &A) override {
1901     AANoRecurseImpl::initialize(A);
1902     Function *F = getAssociatedFunction();
1903     if (!F || F->isDeclaration())
1904       indicatePessimisticFixpoint();
1905   }
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     // TODO: Once we have call site specific value information we can provide
1910     //       call site specific liveness information and then it makes
1911     //       sense to specialize attributes for call sites arguments instead of
1912     //       redirecting requests to the callee argument.
1913     Function *F = getAssociatedFunction();
1914     const IRPosition &FnPos = IRPosition::function(*F);
1915     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1916     return clampStateAndIndicateChange(getState(), FnAA.getState());
1917   }
1918 
1919   /// See AbstractAttribute::trackStatistics()
1920   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1921 };
1922 
1923 /// -------------------- Undefined-Behavior Attributes ------------------------
1924 
1925 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1926   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1927       : AAUndefinedBehavior(IRP, A) {}
1928 
1929   /// See AbstractAttribute::updateImpl(...).
1930   // through a pointer (i.e. also branches etc.)
1931   ChangeStatus updateImpl(Attributor &A) override {
1932     const size_t UBPrevSize = KnownUBInsts.size();
1933     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1934 
1935     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1936       // Skip instructions that are already saved.
1937       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1938         return true;
1939 
1940       // If we reach here, we know we have an instruction
1941       // that accesses memory through a pointer operand,
1942       // for which getPointerOperand() should give it to us.
1943       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1944       assert(PtrOp &&
1945              "Expected pointer operand of memory accessing instruction");
1946 
1947       // Either we stopped and the appropriate action was taken,
1948       // or we got back a simplified value to continue.
1949       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1950       if (!SimplifiedPtrOp.hasValue())
1951         return true;
1952       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1953 
1954       // A memory access through a pointer is considered UB
1955       // only if the pointer has constant null value.
1956       // TODO: Expand it to not only check constant values.
1957       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1958         AssumedNoUBInsts.insert(&I);
1959         return true;
1960       }
1961       const Type *PtrTy = PtrOpVal->getType();
1962 
1963       // Because we only consider instructions inside functions,
1964       // assume that a parent function exists.
1965       const Function *F = I.getFunction();
1966 
1967       // A memory access using constant null pointer is only considered UB
1968       // if null pointer is _not_ defined for the target platform.
1969       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1970         AssumedNoUBInsts.insert(&I);
1971       else
1972         KnownUBInsts.insert(&I);
1973       return true;
1974     };
1975 
1976     auto InspectBrInstForUB = [&](Instruction &I) {
1977       // A conditional branch instruction is considered UB if it has `undef`
1978       // condition.
1979 
1980       // Skip instructions that are already saved.
1981       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1982         return true;
1983 
1984       // We know we have a branch instruction.
1985       auto BrInst = cast<BranchInst>(&I);
1986 
1987       // Unconditional branches are never considered UB.
1988       if (BrInst->isUnconditional())
1989         return true;
1990 
1991       // Either we stopped and the appropriate action was taken,
1992       // or we got back a simplified value to continue.
1993       Optional<Value *> SimplifiedCond =
1994           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1995       if (!SimplifiedCond.hasValue())
1996         return true;
1997       AssumedNoUBInsts.insert(&I);
1998       return true;
1999     };
2000 
2001     auto InspectCallSiteForUB = [&](Instruction &I) {
2002       // Check whether a callsite always cause UB or not
2003 
2004       // Skip instructions that are already saved.
2005       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2006         return true;
2007 
2008       // Check nonnull and noundef argument attribute violation for each
2009       // callsite.
2010       CallBase &CB = cast<CallBase>(I);
2011       Function *Callee = CB.getCalledFunction();
2012       if (!Callee)
2013         return true;
2014       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2015         // If current argument is known to be simplified to null pointer and the
2016         // corresponding argument position is known to have nonnull attribute,
2017         // the argument is poison. Furthermore, if the argument is poison and
2018         // the position is known to have noundef attriubte, this callsite is
2019         // considered UB.
2020         if (idx >= Callee->arg_size())
2021           break;
2022         Value *ArgVal = CB.getArgOperand(idx);
2023         if (!ArgVal)
2024           continue;
2025         // Here, we handle three cases.
2026         //   (1) Not having a value means it is dead. (we can replace the value
2027         //       with undef)
2028         //   (2) Simplified to undef. The argument violate noundef attriubte.
2029         //   (3) Simplified to null pointer where known to be nonnull.
2030         //       The argument is a poison value and violate noundef attribute.
2031         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2032         auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP,
2033                                                 /* TrackDependence */ false);
2034         if (!NoUndefAA.isKnownNoUndef())
2035           continue;
2036         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2037             *this, IRPosition::value(*ArgVal), /* TrackDependence */ false);
2038         if (!ValueSimplifyAA.isKnown())
2039           continue;
2040         Optional<Value *> SimplifiedVal =
2041             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2042         if (!SimplifiedVal.hasValue() ||
2043             isa<UndefValue>(*SimplifiedVal.getValue())) {
2044           KnownUBInsts.insert(&I);
2045           continue;
2046         }
2047         if (!ArgVal->getType()->isPointerTy() ||
2048             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2049           continue;
2050         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP,
2051                                                 /* TrackDependence */ false);
2052         if (NonNullAA.isKnownNonNull())
2053           KnownUBInsts.insert(&I);
2054       }
2055       return true;
2056     };
2057 
2058     auto InspectReturnInstForUB =
2059         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2060           // Check if a return instruction always cause UB or not
2061           // Note: It is guaranteed that the returned position of the anchor
2062           //       scope has noundef attribute when this is called.
2063 
2064           // When the returned position has noundef attriubte, UB occur in the
2065           // following cases.
2066           //   (1) Returned value is known to be undef.
2067           //   (2) The value is known to be a null pointer and the returned
2068           //       position has nonnull attribute (because the returned value is
2069           //       poison).
2070           // Note: This callback is not called for a dead returned value because
2071           //       such values are ignored in
2072           //       checkForAllReturnedValuesAndReturnedInsts.
2073           bool FoundUB = false;
2074           if (isa<UndefValue>(V)) {
2075             FoundUB = true;
2076           } else {
2077             if (isa<ConstantPointerNull>(V)) {
2078               auto &NonNullAA = A.getAAFor<AANonNull>(
2079                   *this, IRPosition::returned(*getAnchorScope()),
2080                   /* TrackDependence */ false);
2081               if (NonNullAA.isKnownNonNull())
2082                 FoundUB = true;
2083             }
2084           }
2085 
2086           if (FoundUB)
2087             for (ReturnInst *RI : RetInsts)
2088               KnownUBInsts.insert(RI);
2089           return true;
2090         };
2091 
2092     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2093                               {Instruction::Load, Instruction::Store,
2094                                Instruction::AtomicCmpXchg,
2095                                Instruction::AtomicRMW},
2096                               /* CheckBBLivenessOnly */ true);
2097     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2098                               /* CheckBBLivenessOnly */ true);
2099     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2100 
2101     // If the returned position of the anchor scope has noundef attriubte, check
2102     // all returned instructions.
2103     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2104       auto &RetPosNoUndefAA =
2105           A.getAAFor<AANoUndef>(*this, IRPosition::returned(*getAnchorScope()),
2106                                 /* TrackDependence */ false);
2107       if (RetPosNoUndefAA.isKnownNoUndef())
2108         A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2109                                                   *this);
2110     }
2111 
2112     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2113         UBPrevSize != KnownUBInsts.size())
2114       return ChangeStatus::CHANGED;
2115     return ChangeStatus::UNCHANGED;
2116   }
2117 
2118   bool isKnownToCauseUB(Instruction *I) const override {
2119     return KnownUBInsts.count(I);
2120   }
2121 
2122   bool isAssumedToCauseUB(Instruction *I) const override {
2123     // In simple words, if an instruction is not in the assumed to _not_
2124     // cause UB, then it is assumed UB (that includes those
2125     // in the KnownUBInsts set). The rest is boilerplate
2126     // is to ensure that it is one of the instructions we test
2127     // for UB.
2128 
2129     switch (I->getOpcode()) {
2130     case Instruction::Load:
2131     case Instruction::Store:
2132     case Instruction::AtomicCmpXchg:
2133     case Instruction::AtomicRMW:
2134       return !AssumedNoUBInsts.count(I);
2135     case Instruction::Br: {
2136       auto BrInst = cast<BranchInst>(I);
2137       if (BrInst->isUnconditional())
2138         return false;
2139       return !AssumedNoUBInsts.count(I);
2140     } break;
2141     default:
2142       return false;
2143     }
2144     return false;
2145   }
2146 
2147   ChangeStatus manifest(Attributor &A) override {
2148     if (KnownUBInsts.empty())
2149       return ChangeStatus::UNCHANGED;
2150     for (Instruction *I : KnownUBInsts)
2151       A.changeToUnreachableAfterManifest(I);
2152     return ChangeStatus::CHANGED;
2153   }
2154 
2155   /// See AbstractAttribute::getAsStr()
2156   const std::string getAsStr() const override {
2157     return getAssumed() ? "undefined-behavior" : "no-ub";
2158   }
2159 
2160   /// Note: The correctness of this analysis depends on the fact that the
2161   /// following 2 sets will stop changing after some point.
2162   /// "Change" here means that their size changes.
2163   /// The size of each set is monotonically increasing
2164   /// (we only add items to them) and it is upper bounded by the number of
2165   /// instructions in the processed function (we can never save more
2166   /// elements in either set than this number). Hence, at some point,
2167   /// they will stop increasing.
2168   /// Consequently, at some point, both sets will have stopped
2169   /// changing, effectively making the analysis reach a fixpoint.
2170 
2171   /// Note: These 2 sets are disjoint and an instruction can be considered
2172   /// one of 3 things:
2173   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2174   ///    the KnownUBInsts set.
2175   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2176   ///    has a reason to assume it).
2177   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2178   ///    could not find a reason to assume or prove that it can cause UB,
2179   ///    hence it assumes it doesn't. We have a set for these instructions
2180   ///    so that we don't reprocess them in every update.
2181   ///    Note however that instructions in this set may cause UB.
2182 
2183 protected:
2184   /// A set of all live instructions _known_ to cause UB.
2185   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2186 
2187 private:
2188   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2189   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2190 
2191   // Should be called on updates in which if we're processing an instruction
2192   // \p I that depends on a value \p V, one of the following has to happen:
2193   // - If the value is assumed, then stop.
2194   // - If the value is known but undef, then consider it UB.
2195   // - Otherwise, do specific processing with the simplified value.
2196   // We return None in the first 2 cases to signify that an appropriate
2197   // action was taken and the caller should stop.
2198   // Otherwise, we return the simplified value that the caller should
2199   // use for specific processing.
2200   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2201                                          Instruction *I) {
2202     const auto &ValueSimplifyAA =
2203         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2204     Optional<Value *> SimplifiedV =
2205         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2206     if (!ValueSimplifyAA.isKnown()) {
2207       // Don't depend on assumed values.
2208       return llvm::None;
2209     }
2210     if (!SimplifiedV.hasValue()) {
2211       // If it is known (which we tested above) but it doesn't have a value,
2212       // then we can assume `undef` and hence the instruction is UB.
2213       KnownUBInsts.insert(I);
2214       return llvm::None;
2215     }
2216     Value *Val = SimplifiedV.getValue();
2217     if (isa<UndefValue>(Val)) {
2218       KnownUBInsts.insert(I);
2219       return llvm::None;
2220     }
2221     return Val;
2222   }
2223 };
2224 
2225 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2226   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2227       : AAUndefinedBehaviorImpl(IRP, A) {}
2228 
2229   /// See AbstractAttribute::trackStatistics()
2230   void trackStatistics() const override {
2231     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2232                "Number of instructions known to have UB");
2233     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2234         KnownUBInsts.size();
2235   }
2236 };
2237 
2238 /// ------------------------ Will-Return Attributes ----------------------------
2239 
2240 // Helper function that checks whether a function has any cycle which we don't
2241 // know if it is bounded or not.
2242 // Loops with maximum trip count are considered bounded, any other cycle not.
2243 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2244   ScalarEvolution *SE =
2245       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2246   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2247   // If either SCEV or LoopInfo is not available for the function then we assume
2248   // any cycle to be unbounded cycle.
2249   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2250   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2251   if (!SE || !LI) {
2252     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2253       if (SCCI.hasCycle())
2254         return true;
2255     return false;
2256   }
2257 
2258   // If there's irreducible control, the function may contain non-loop cycles.
2259   if (mayContainIrreducibleControl(F, LI))
2260     return true;
2261 
2262   // Any loop that does not have a max trip count is considered unbounded cycle.
2263   for (auto *L : LI->getLoopsInPreorder()) {
2264     if (!SE->getSmallConstantMaxTripCount(L))
2265       return true;
2266   }
2267   return false;
2268 }
2269 
2270 struct AAWillReturnImpl : public AAWillReturn {
2271   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2272       : AAWillReturn(IRP, A) {}
2273 
2274   /// See AbstractAttribute::initialize(...).
2275   void initialize(Attributor &A) override {
2276     AAWillReturn::initialize(A);
2277 
2278     Function *F = getAnchorScope();
2279     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2280       indicatePessimisticFixpoint();
2281   }
2282 
2283   /// See AbstractAttribute::updateImpl(...).
2284   ChangeStatus updateImpl(Attributor &A) override {
2285     auto CheckForWillReturn = [&](Instruction &I) {
2286       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2287       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2288       if (WillReturnAA.isKnownWillReturn())
2289         return true;
2290       if (!WillReturnAA.isAssumedWillReturn())
2291         return false;
2292       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2293       return NoRecurseAA.isAssumedNoRecurse();
2294     };
2295 
2296     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2297       return indicatePessimisticFixpoint();
2298 
2299     return ChangeStatus::UNCHANGED;
2300   }
2301 
2302   /// See AbstractAttribute::getAsStr()
2303   const std::string getAsStr() const override {
2304     return getAssumed() ? "willreturn" : "may-noreturn";
2305   }
2306 };
2307 
2308 struct AAWillReturnFunction final : AAWillReturnImpl {
2309   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2310       : AAWillReturnImpl(IRP, A) {}
2311 
2312   /// See AbstractAttribute::trackStatistics()
2313   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2314 };
2315 
2316 /// WillReturn attribute deduction for a call sites.
2317 struct AAWillReturnCallSite final : AAWillReturnImpl {
2318   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2319       : AAWillReturnImpl(IRP, A) {}
2320 
2321   /// See AbstractAttribute::initialize(...).
2322   void initialize(Attributor &A) override {
2323     AAWillReturn::initialize(A);
2324     Function *F = getAssociatedFunction();
2325     if (!F || !A.isFunctionIPOAmendable(*F))
2326       indicatePessimisticFixpoint();
2327   }
2328 
2329   /// See AbstractAttribute::updateImpl(...).
2330   ChangeStatus updateImpl(Attributor &A) override {
2331     // TODO: Once we have call site specific value information we can provide
2332     //       call site specific liveness information and then it makes
2333     //       sense to specialize attributes for call sites arguments instead of
2334     //       redirecting requests to the callee argument.
2335     Function *F = getAssociatedFunction();
2336     const IRPosition &FnPos = IRPosition::function(*F);
2337     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2338     return clampStateAndIndicateChange(getState(), FnAA.getState());
2339   }
2340 
2341   /// See AbstractAttribute::trackStatistics()
2342   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2343 };
2344 
2345 /// -------------------AAReachability Attribute--------------------------
2346 
2347 struct AAReachabilityImpl : AAReachability {
2348   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2349       : AAReachability(IRP, A) {}
2350 
2351   const std::string getAsStr() const override {
2352     // TODO: Return the number of reachable queries.
2353     return "reachable";
2354   }
2355 
2356   /// See AbstractAttribute::initialize(...).
2357   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2358 
2359   /// See AbstractAttribute::updateImpl(...).
2360   ChangeStatus updateImpl(Attributor &A) override {
2361     return indicatePessimisticFixpoint();
2362   }
2363 };
2364 
2365 struct AAReachabilityFunction final : public AAReachabilityImpl {
2366   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2367       : AAReachabilityImpl(IRP, A) {}
2368 
2369   /// See AbstractAttribute::trackStatistics()
2370   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2371 };
2372 
2373 /// ------------------------ NoAlias Argument Attribute ------------------------
2374 
2375 struct AANoAliasImpl : AANoAlias {
2376   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2377     assert(getAssociatedType()->isPointerTy() &&
2378            "Noalias is a pointer attribute");
2379   }
2380 
2381   const std::string getAsStr() const override {
2382     return getAssumed() ? "noalias" : "may-alias";
2383   }
2384 };
2385 
2386 /// NoAlias attribute for a floating value.
2387 struct AANoAliasFloating final : AANoAliasImpl {
2388   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2389       : AANoAliasImpl(IRP, A) {}
2390 
2391   /// See AbstractAttribute::initialize(...).
2392   void initialize(Attributor &A) override {
2393     AANoAliasImpl::initialize(A);
2394     Value *Val = &getAssociatedValue();
2395     do {
2396       CastInst *CI = dyn_cast<CastInst>(Val);
2397       if (!CI)
2398         break;
2399       Value *Base = CI->getOperand(0);
2400       if (!Base->hasOneUse())
2401         break;
2402       Val = Base;
2403     } while (true);
2404 
2405     if (!Val->getType()->isPointerTy()) {
2406       indicatePessimisticFixpoint();
2407       return;
2408     }
2409 
2410     if (isa<AllocaInst>(Val))
2411       indicateOptimisticFixpoint();
2412     else if (isa<ConstantPointerNull>(Val) &&
2413              !NullPointerIsDefined(getAnchorScope(),
2414                                    Val->getType()->getPointerAddressSpace()))
2415       indicateOptimisticFixpoint();
2416     else if (Val != &getAssociatedValue()) {
2417       const auto &ValNoAliasAA =
2418           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2419       if (ValNoAliasAA.isKnownNoAlias())
2420         indicateOptimisticFixpoint();
2421     }
2422   }
2423 
2424   /// See AbstractAttribute::updateImpl(...).
2425   ChangeStatus updateImpl(Attributor &A) override {
2426     // TODO: Implement this.
2427     return indicatePessimisticFixpoint();
2428   }
2429 
2430   /// See AbstractAttribute::trackStatistics()
2431   void trackStatistics() const override {
2432     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2433   }
2434 };
2435 
2436 /// NoAlias attribute for an argument.
2437 struct AANoAliasArgument final
2438     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2439   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2440   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2441 
2442   /// See AbstractAttribute::initialize(...).
2443   void initialize(Attributor &A) override {
2444     Base::initialize(A);
2445     // See callsite argument attribute and callee argument attribute.
2446     if (hasAttr({Attribute::ByVal}))
2447       indicateOptimisticFixpoint();
2448   }
2449 
2450   /// See AbstractAttribute::update(...).
2451   ChangeStatus updateImpl(Attributor &A) override {
2452     // We have to make sure no-alias on the argument does not break
2453     // synchronization when this is a callback argument, see also [1] below.
2454     // If synchronization cannot be affected, we delegate to the base updateImpl
2455     // function, otherwise we give up for now.
2456 
2457     // If the function is no-sync, no-alias cannot break synchronization.
2458     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2459         *this, IRPosition::function_scope(getIRPosition()));
2460     if (NoSyncAA.isAssumedNoSync())
2461       return Base::updateImpl(A);
2462 
2463     // If the argument is read-only, no-alias cannot break synchronization.
2464     const auto &MemBehaviorAA =
2465         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2466     if (MemBehaviorAA.isAssumedReadOnly())
2467       return Base::updateImpl(A);
2468 
2469     // If the argument is never passed through callbacks, no-alias cannot break
2470     // synchronization.
2471     bool AllCallSitesKnown;
2472     if (A.checkForAllCallSites(
2473             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2474             true, AllCallSitesKnown))
2475       return Base::updateImpl(A);
2476 
2477     // TODO: add no-alias but make sure it doesn't break synchronization by
2478     // introducing fake uses. See:
2479     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2480     //     International Workshop on OpenMP 2018,
2481     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2482 
2483     return indicatePessimisticFixpoint();
2484   }
2485 
2486   /// See AbstractAttribute::trackStatistics()
2487   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2488 };
2489 
2490 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2491   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2492       : AANoAliasImpl(IRP, A) {}
2493 
2494   /// See AbstractAttribute::initialize(...).
2495   void initialize(Attributor &A) override {
2496     // See callsite argument attribute and callee argument attribute.
2497     const auto &CB = cast<CallBase>(getAnchorValue());
2498     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2499       indicateOptimisticFixpoint();
2500     Value &Val = getAssociatedValue();
2501     if (isa<ConstantPointerNull>(Val) &&
2502         !NullPointerIsDefined(getAnchorScope(),
2503                               Val.getType()->getPointerAddressSpace()))
2504       indicateOptimisticFixpoint();
2505   }
2506 
2507   /// Determine if the underlying value may alias with the call site argument
2508   /// \p OtherArgNo of \p ICS (= the underlying call site).
2509   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2510                             const AAMemoryBehavior &MemBehaviorAA,
2511                             const CallBase &CB, unsigned OtherArgNo) {
2512     // We do not need to worry about aliasing with the underlying IRP.
2513     if (this->getCalleeArgNo() == (int)OtherArgNo)
2514       return false;
2515 
2516     // If it is not a pointer or pointer vector we do not alias.
2517     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2518     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2519       return false;
2520 
2521     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2522         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2523         /* TrackDependence */ false);
2524 
2525     // If the argument is readnone, there is no read-write aliasing.
2526     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2527       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2528       return false;
2529     }
2530 
2531     // If the argument is readonly and the underlying value is readonly, there
2532     // is no read-write aliasing.
2533     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2534     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2535       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2536       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2537       return false;
2538     }
2539 
2540     // We have to utilize actual alias analysis queries so we need the object.
2541     if (!AAR)
2542       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2543 
2544     // Try to rule it out at the call site.
2545     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2546     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2547                          "callsite arguments: "
2548                       << getAssociatedValue() << " " << *ArgOp << " => "
2549                       << (IsAliasing ? "" : "no-") << "alias \n");
2550 
2551     return IsAliasing;
2552   }
2553 
2554   bool
2555   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2556                                          const AAMemoryBehavior &MemBehaviorAA,
2557                                          const AANoAlias &NoAliasAA) {
2558     // We can deduce "noalias" if the following conditions hold.
2559     // (i)   Associated value is assumed to be noalias in the definition.
2560     // (ii)  Associated value is assumed to be no-capture in all the uses
2561     //       possibly executed before this callsite.
2562     // (iii) There is no other pointer argument which could alias with the
2563     //       value.
2564 
2565     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2566     if (!AssociatedValueIsNoAliasAtDef) {
2567       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2568                         << " is not no-alias at the definition\n");
2569       return false;
2570     }
2571 
2572     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2573 
2574     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2575     auto &NoCaptureAA =
2576         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2577     // Check whether the value is captured in the scope using AANoCapture.
2578     //      Look at CFG and check only uses possibly executed before this
2579     //      callsite.
2580     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2581       Instruction *UserI = cast<Instruction>(U.getUser());
2582 
2583       // If user if curr instr and only use.
2584       if (UserI == getCtxI() && UserI->hasOneUse())
2585         return true;
2586 
2587       const Function *ScopeFn = VIRP.getAnchorScope();
2588       if (ScopeFn) {
2589         const auto &ReachabilityAA =
2590             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2591 
2592         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2593           return true;
2594 
2595         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2596           if (CB->isArgOperand(&U)) {
2597 
2598             unsigned ArgNo = CB->getArgOperandNo(&U);
2599 
2600             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2601                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2602 
2603             if (NoCaptureAA.isAssumedNoCapture())
2604               return true;
2605           }
2606         }
2607       }
2608 
2609       // For cases which can potentially have more users
2610       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2611           isa<SelectInst>(U)) {
2612         Follow = true;
2613         return true;
2614       }
2615 
2616       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2617       return false;
2618     };
2619 
2620     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2621       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2622         LLVM_DEBUG(
2623             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2624                    << " cannot be noalias as it is potentially captured\n");
2625         return false;
2626       }
2627     }
2628     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2629 
2630     // Check there is no other pointer argument which could alias with the
2631     // value passed at this call site.
2632     // TODO: AbstractCallSite
2633     const auto &CB = cast<CallBase>(getAnchorValue());
2634     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2635          OtherArgNo++)
2636       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2637         return false;
2638 
2639     return true;
2640   }
2641 
2642   /// See AbstractAttribute::updateImpl(...).
2643   ChangeStatus updateImpl(Attributor &A) override {
2644     // If the argument is readnone we are done as there are no accesses via the
2645     // argument.
2646     auto &MemBehaviorAA =
2647         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2648                                      /* TrackDependence */ false);
2649     if (MemBehaviorAA.isAssumedReadNone()) {
2650       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2651       return ChangeStatus::UNCHANGED;
2652     }
2653 
2654     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2655     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2656                                                   /* TrackDependence */ false);
2657 
2658     AAResults *AAR = nullptr;
2659     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2660                                                NoAliasAA)) {
2661       LLVM_DEBUG(
2662           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2663       return ChangeStatus::UNCHANGED;
2664     }
2665 
2666     return indicatePessimisticFixpoint();
2667   }
2668 
2669   /// See AbstractAttribute::trackStatistics()
2670   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2671 };
2672 
2673 /// NoAlias attribute for function return value.
2674 struct AANoAliasReturned final : AANoAliasImpl {
2675   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2676       : AANoAliasImpl(IRP, A) {}
2677 
2678   /// See AbstractAttribute::initialize(...).
2679   void initialize(Attributor &A) override {
2680     AANoAliasImpl::initialize(A);
2681     Function *F = getAssociatedFunction();
2682     if (!F || F->isDeclaration())
2683       indicatePessimisticFixpoint();
2684   }
2685 
2686   /// See AbstractAttribute::updateImpl(...).
2687   virtual ChangeStatus updateImpl(Attributor &A) override {
2688 
2689     auto CheckReturnValue = [&](Value &RV) -> bool {
2690       if (Constant *C = dyn_cast<Constant>(&RV))
2691         if (C->isNullValue() || isa<UndefValue>(C))
2692           return true;
2693 
2694       /// For now, we can only deduce noalias if we have call sites.
2695       /// FIXME: add more support.
2696       if (!isa<CallBase>(&RV))
2697         return false;
2698 
2699       const IRPosition &RVPos = IRPosition::value(RV);
2700       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2701       if (!NoAliasAA.isAssumedNoAlias())
2702         return false;
2703 
2704       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2705       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2706     };
2707 
2708     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2709       return indicatePessimisticFixpoint();
2710 
2711     return ChangeStatus::UNCHANGED;
2712   }
2713 
2714   /// See AbstractAttribute::trackStatistics()
2715   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2716 };
2717 
2718 /// NoAlias attribute deduction for a call site return value.
2719 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2720   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2721       : AANoAliasImpl(IRP, A) {}
2722 
2723   /// See AbstractAttribute::initialize(...).
2724   void initialize(Attributor &A) override {
2725     AANoAliasImpl::initialize(A);
2726     Function *F = getAssociatedFunction();
2727     if (!F || F->isDeclaration())
2728       indicatePessimisticFixpoint();
2729   }
2730 
2731   /// See AbstractAttribute::updateImpl(...).
2732   ChangeStatus updateImpl(Attributor &A) override {
2733     // TODO: Once we have call site specific value information we can provide
2734     //       call site specific liveness information and then it makes
2735     //       sense to specialize attributes for call sites arguments instead of
2736     //       redirecting requests to the callee argument.
2737     Function *F = getAssociatedFunction();
2738     const IRPosition &FnPos = IRPosition::returned(*F);
2739     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2740     return clampStateAndIndicateChange(getState(), FnAA.getState());
2741   }
2742 
2743   /// See AbstractAttribute::trackStatistics()
2744   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2745 };
2746 
2747 /// -------------------AAIsDead Function Attribute-----------------------
2748 
2749 struct AAIsDeadValueImpl : public AAIsDead {
2750   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2751 
2752   /// See AAIsDead::isAssumedDead().
2753   bool isAssumedDead() const override { return getAssumed(); }
2754 
2755   /// See AAIsDead::isKnownDead().
2756   bool isKnownDead() const override { return getKnown(); }
2757 
2758   /// See AAIsDead::isAssumedDead(BasicBlock *).
2759   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2760 
2761   /// See AAIsDead::isKnownDead(BasicBlock *).
2762   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2763 
2764   /// See AAIsDead::isAssumedDead(Instruction *I).
2765   bool isAssumedDead(const Instruction *I) const override {
2766     return I == getCtxI() && isAssumedDead();
2767   }
2768 
2769   /// See AAIsDead::isKnownDead(Instruction *I).
2770   bool isKnownDead(const Instruction *I) const override {
2771     return isAssumedDead(I) && getKnown();
2772   }
2773 
2774   /// See AbstractAttribute::getAsStr().
2775   const std::string getAsStr() const override {
2776     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2777   }
2778 
2779   /// Check if all uses are assumed dead.
2780   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2781     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2782     // Explicitly set the dependence class to required because we want a long
2783     // chain of N dependent instructions to be considered live as soon as one is
2784     // without going through N update cycles. This is not required for
2785     // correctness.
2786     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2787   }
2788 
2789   /// Determine if \p I is assumed to be side-effect free.
2790   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2791     if (!I || wouldInstructionBeTriviallyDead(I))
2792       return true;
2793 
2794     auto *CB = dyn_cast<CallBase>(I);
2795     if (!CB || isa<IntrinsicInst>(CB))
2796       return false;
2797 
2798     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2799     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2800         *this, CallIRP, /* TrackDependence */ false);
2801     if (!NoUnwindAA.isAssumedNoUnwind())
2802       return false;
2803     if (!NoUnwindAA.isKnownNoUnwind())
2804       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2805 
2806     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2807         *this, CallIRP, /* TrackDependence */ false);
2808     if (MemBehaviorAA.isAssumedReadOnly()) {
2809       if (!MemBehaviorAA.isKnownReadOnly())
2810         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2811       return true;
2812     }
2813     return false;
2814   }
2815 };
2816 
2817 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2818   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2819       : AAIsDeadValueImpl(IRP, A) {}
2820 
2821   /// See AbstractAttribute::initialize(...).
2822   void initialize(Attributor &A) override {
2823     if (isa<UndefValue>(getAssociatedValue())) {
2824       indicatePessimisticFixpoint();
2825       return;
2826     }
2827 
2828     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2829     if (!isAssumedSideEffectFree(A, I))
2830       indicatePessimisticFixpoint();
2831   }
2832 
2833   /// See AbstractAttribute::updateImpl(...).
2834   ChangeStatus updateImpl(Attributor &A) override {
2835     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2836     if (!isAssumedSideEffectFree(A, I))
2837       return indicatePessimisticFixpoint();
2838 
2839     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2840       return indicatePessimisticFixpoint();
2841     return ChangeStatus::UNCHANGED;
2842   }
2843 
2844   /// See AbstractAttribute::manifest(...).
2845   ChangeStatus manifest(Attributor &A) override {
2846     Value &V = getAssociatedValue();
2847     if (auto *I = dyn_cast<Instruction>(&V)) {
2848       // If we get here we basically know the users are all dead. We check if
2849       // isAssumedSideEffectFree returns true here again because it might not be
2850       // the case and only the users are dead but the instruction (=call) is
2851       // still needed.
2852       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2853         A.deleteAfterManifest(*I);
2854         return ChangeStatus::CHANGED;
2855       }
2856     }
2857     if (V.use_empty())
2858       return ChangeStatus::UNCHANGED;
2859 
2860     bool UsedAssumedInformation = false;
2861     Optional<Constant *> C =
2862         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2863     if (C.hasValue() && C.getValue())
2864       return ChangeStatus::UNCHANGED;
2865 
2866     // Replace the value with undef as it is dead but keep droppable uses around
2867     // as they provide information we don't want to give up on just yet.
2868     UndefValue &UV = *UndefValue::get(V.getType());
2869     bool AnyChange =
2870         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2871     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2872   }
2873 
2874   /// See AbstractAttribute::trackStatistics()
2875   void trackStatistics() const override {
2876     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2877   }
2878 };
2879 
2880 struct AAIsDeadArgument : public AAIsDeadFloating {
2881   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2882       : AAIsDeadFloating(IRP, A) {}
2883 
2884   /// See AbstractAttribute::initialize(...).
2885   void initialize(Attributor &A) override {
2886     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2887       indicatePessimisticFixpoint();
2888   }
2889 
2890   /// See AbstractAttribute::manifest(...).
2891   ChangeStatus manifest(Attributor &A) override {
2892     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2893     Argument &Arg = *getAssociatedArgument();
2894     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2895       if (A.registerFunctionSignatureRewrite(
2896               Arg, /* ReplacementTypes */ {},
2897               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2898               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2899         Arg.dropDroppableUses();
2900         return ChangeStatus::CHANGED;
2901       }
2902     return Changed;
2903   }
2904 
2905   /// See AbstractAttribute::trackStatistics()
2906   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2907 };
2908 
2909 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2910   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2911       : AAIsDeadValueImpl(IRP, A) {}
2912 
2913   /// See AbstractAttribute::initialize(...).
2914   void initialize(Attributor &A) override {
2915     if (isa<UndefValue>(getAssociatedValue()))
2916       indicatePessimisticFixpoint();
2917   }
2918 
2919   /// See AbstractAttribute::updateImpl(...).
2920   ChangeStatus updateImpl(Attributor &A) override {
2921     // TODO: Once we have call site specific value information we can provide
2922     //       call site specific liveness information and then it makes
2923     //       sense to specialize attributes for call sites arguments instead of
2924     //       redirecting requests to the callee argument.
2925     Argument *Arg = getAssociatedArgument();
2926     if (!Arg)
2927       return indicatePessimisticFixpoint();
2928     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2929     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2930     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2931   }
2932 
2933   /// See AbstractAttribute::manifest(...).
2934   ChangeStatus manifest(Attributor &A) override {
2935     CallBase &CB = cast<CallBase>(getAnchorValue());
2936     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2937     assert(!isa<UndefValue>(U.get()) &&
2938            "Expected undef values to be filtered out!");
2939     UndefValue &UV = *UndefValue::get(U->getType());
2940     if (A.changeUseAfterManifest(U, UV))
2941       return ChangeStatus::CHANGED;
2942     return ChangeStatus::UNCHANGED;
2943   }
2944 
2945   /// See AbstractAttribute::trackStatistics()
2946   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2947 };
2948 
2949 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2950   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2951       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2952 
2953   /// See AAIsDead::isAssumedDead().
2954   bool isAssumedDead() const override {
2955     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2956   }
2957 
2958   /// See AbstractAttribute::initialize(...).
2959   void initialize(Attributor &A) override {
2960     if (isa<UndefValue>(getAssociatedValue())) {
2961       indicatePessimisticFixpoint();
2962       return;
2963     }
2964 
2965     // We track this separately as a secondary state.
2966     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2967   }
2968 
2969   /// See AbstractAttribute::updateImpl(...).
2970   ChangeStatus updateImpl(Attributor &A) override {
2971     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2972     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2973       IsAssumedSideEffectFree = false;
2974       Changed = ChangeStatus::CHANGED;
2975     }
2976 
2977     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2978       return indicatePessimisticFixpoint();
2979     return Changed;
2980   }
2981 
2982   /// See AbstractAttribute::trackStatistics()
2983   void trackStatistics() const override {
2984     if (IsAssumedSideEffectFree)
2985       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2986     else
2987       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2988   }
2989 
2990   /// See AbstractAttribute::getAsStr().
2991   const std::string getAsStr() const override {
2992     return isAssumedDead()
2993                ? "assumed-dead"
2994                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2995   }
2996 
2997 private:
2998   bool IsAssumedSideEffectFree;
2999 };
3000 
3001 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3002   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3003       : AAIsDeadValueImpl(IRP, A) {}
3004 
3005   /// See AbstractAttribute::updateImpl(...).
3006   ChangeStatus updateImpl(Attributor &A) override {
3007 
3008     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3009                               {Instruction::Ret});
3010 
3011     auto PredForCallSite = [&](AbstractCallSite ACS) {
3012       if (ACS.isCallbackCall() || !ACS.getInstruction())
3013         return false;
3014       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3015     };
3016 
3017     bool AllCallSitesKnown;
3018     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3019                                 AllCallSitesKnown))
3020       return indicatePessimisticFixpoint();
3021 
3022     return ChangeStatus::UNCHANGED;
3023   }
3024 
3025   /// See AbstractAttribute::manifest(...).
3026   ChangeStatus manifest(Attributor &A) override {
3027     // TODO: Rewrite the signature to return void?
3028     bool AnyChange = false;
3029     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3030     auto RetInstPred = [&](Instruction &I) {
3031       ReturnInst &RI = cast<ReturnInst>(I);
3032       if (!isa<UndefValue>(RI.getReturnValue()))
3033         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3034       return true;
3035     };
3036     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3037     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3038   }
3039 
3040   /// See AbstractAttribute::trackStatistics()
3041   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3042 };
3043 
3044 struct AAIsDeadFunction : public AAIsDead {
3045   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3046 
3047   /// See AbstractAttribute::initialize(...).
3048   void initialize(Attributor &A) override {
3049     const Function *F = getAnchorScope();
3050     if (F && !F->isDeclaration()) {
3051       // We only want to compute liveness once. If the function is not part of
3052       // the SCC, skip it.
3053       if (A.isRunOn(*const_cast<Function *>(F))) {
3054         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3055         assumeLive(A, F->getEntryBlock());
3056       } else {
3057         indicatePessimisticFixpoint();
3058       }
3059     }
3060   }
3061 
3062   /// See AbstractAttribute::getAsStr().
3063   const std::string getAsStr() const override {
3064     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3065            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3066            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3067            std::to_string(KnownDeadEnds.size()) + "]";
3068   }
3069 
3070   /// See AbstractAttribute::manifest(...).
3071   ChangeStatus manifest(Attributor &A) override {
3072     assert(getState().isValidState() &&
3073            "Attempted to manifest an invalid state!");
3074 
3075     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3076     Function &F = *getAnchorScope();
3077 
3078     if (AssumedLiveBlocks.empty()) {
3079       A.deleteAfterManifest(F);
3080       return ChangeStatus::CHANGED;
3081     }
3082 
3083     // Flag to determine if we can change an invoke to a call assuming the
3084     // callee is nounwind. This is not possible if the personality of the
3085     // function allows to catch asynchronous exceptions.
3086     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3087 
3088     KnownDeadEnds.set_union(ToBeExploredFrom);
3089     for (const Instruction *DeadEndI : KnownDeadEnds) {
3090       auto *CB = dyn_cast<CallBase>(DeadEndI);
3091       if (!CB)
3092         continue;
3093       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3094           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3095           DepClassTy::OPTIONAL);
3096       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3097       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3098         continue;
3099 
3100       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3101         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3102       else
3103         A.changeToUnreachableAfterManifest(
3104             const_cast<Instruction *>(DeadEndI->getNextNode()));
3105       HasChanged = ChangeStatus::CHANGED;
3106     }
3107 
3108     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3109     for (BasicBlock &BB : F)
3110       if (!AssumedLiveBlocks.count(&BB)) {
3111         A.deleteAfterManifest(BB);
3112         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3113       }
3114 
3115     return HasChanged;
3116   }
3117 
3118   /// See AbstractAttribute::updateImpl(...).
3119   ChangeStatus updateImpl(Attributor &A) override;
3120 
3121   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3122     return !AssumedLiveEdges.count(std::make_pair(From, To));
3123   }
3124 
3125   /// See AbstractAttribute::trackStatistics()
3126   void trackStatistics() const override {}
3127 
3128   /// Returns true if the function is assumed dead.
3129   bool isAssumedDead() const override { return false; }
3130 
3131   /// See AAIsDead::isKnownDead().
3132   bool isKnownDead() const override { return false; }
3133 
3134   /// See AAIsDead::isAssumedDead(BasicBlock *).
3135   bool isAssumedDead(const BasicBlock *BB) const override {
3136     assert(BB->getParent() == getAnchorScope() &&
3137            "BB must be in the same anchor scope function.");
3138 
3139     if (!getAssumed())
3140       return false;
3141     return !AssumedLiveBlocks.count(BB);
3142   }
3143 
3144   /// See AAIsDead::isKnownDead(BasicBlock *).
3145   bool isKnownDead(const BasicBlock *BB) const override {
3146     return getKnown() && isAssumedDead(BB);
3147   }
3148 
3149   /// See AAIsDead::isAssumed(Instruction *I).
3150   bool isAssumedDead(const Instruction *I) const override {
3151     assert(I->getParent()->getParent() == getAnchorScope() &&
3152            "Instruction must be in the same anchor scope function.");
3153 
3154     if (!getAssumed())
3155       return false;
3156 
3157     // If it is not in AssumedLiveBlocks then it for sure dead.
3158     // Otherwise, it can still be after noreturn call in a live block.
3159     if (!AssumedLiveBlocks.count(I->getParent()))
3160       return true;
3161 
3162     // If it is not after a liveness barrier it is live.
3163     const Instruction *PrevI = I->getPrevNode();
3164     while (PrevI) {
3165       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3166         return true;
3167       PrevI = PrevI->getPrevNode();
3168     }
3169     return false;
3170   }
3171 
3172   /// See AAIsDead::isKnownDead(Instruction *I).
3173   bool isKnownDead(const Instruction *I) const override {
3174     return getKnown() && isAssumedDead(I);
3175   }
3176 
3177   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3178   /// that internal function called from \p BB should now be looked at.
3179   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3180     if (!AssumedLiveBlocks.insert(&BB).second)
3181       return false;
3182 
3183     // We assume that all of BB is (probably) live now and if there are calls to
3184     // internal functions we will assume that those are now live as well. This
3185     // is a performance optimization for blocks with calls to a lot of internal
3186     // functions. It can however cause dead functions to be treated as live.
3187     for (const Instruction &I : BB)
3188       if (const auto *CB = dyn_cast<CallBase>(&I))
3189         if (const Function *F = CB->getCalledFunction())
3190           if (F->hasLocalLinkage())
3191             A.markLiveInternalFunction(*F);
3192     return true;
3193   }
3194 
3195   /// Collection of instructions that need to be explored again, e.g., we
3196   /// did assume they do not transfer control to (one of their) successors.
3197   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3198 
3199   /// Collection of instructions that are known to not transfer control.
3200   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3201 
3202   /// Collection of all assumed live edges
3203   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3204 
3205   /// Collection of all assumed live BasicBlocks.
3206   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3207 };
3208 
3209 static bool
3210 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3211                         AbstractAttribute &AA,
3212                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3213   const IRPosition &IPos = IRPosition::callsite_function(CB);
3214 
3215   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3216       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3217   if (NoReturnAA.isAssumedNoReturn())
3218     return !NoReturnAA.isKnownNoReturn();
3219   if (CB.isTerminator())
3220     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3221   else
3222     AliveSuccessors.push_back(CB.getNextNode());
3223   return false;
3224 }
3225 
3226 static bool
3227 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3228                         AbstractAttribute &AA,
3229                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3230   bool UsedAssumedInformation =
3231       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3232 
3233   // First, determine if we can change an invoke to a call assuming the
3234   // callee is nounwind. This is not possible if the personality of the
3235   // function allows to catch asynchronous exceptions.
3236   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3237     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3238   } else {
3239     const IRPosition &IPos = IRPosition::callsite_function(II);
3240     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3241         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3242     if (AANoUnw.isAssumedNoUnwind()) {
3243       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3244     } else {
3245       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3246     }
3247   }
3248   return UsedAssumedInformation;
3249 }
3250 
3251 static bool
3252 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3253                         AbstractAttribute &AA,
3254                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3255   bool UsedAssumedInformation = false;
3256   if (BI.getNumSuccessors() == 1) {
3257     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3258   } else {
3259     Optional<ConstantInt *> CI = getAssumedConstantInt(
3260         A, *BI.getCondition(), AA, UsedAssumedInformation);
3261     if (!CI.hasValue()) {
3262       // No value yet, assume both edges are dead.
3263     } else if (CI.getValue()) {
3264       const BasicBlock *SuccBB =
3265           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3266       AliveSuccessors.push_back(&SuccBB->front());
3267     } else {
3268       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3269       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3270       UsedAssumedInformation = false;
3271     }
3272   }
3273   return UsedAssumedInformation;
3274 }
3275 
3276 static bool
3277 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3278                         AbstractAttribute &AA,
3279                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3280   bool UsedAssumedInformation = false;
3281   Optional<ConstantInt *> CI =
3282       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3283   if (!CI.hasValue()) {
3284     // No value yet, assume all edges are dead.
3285   } else if (CI.getValue()) {
3286     for (auto &CaseIt : SI.cases()) {
3287       if (CaseIt.getCaseValue() == CI.getValue()) {
3288         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3289         return UsedAssumedInformation;
3290       }
3291     }
3292     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3293     return UsedAssumedInformation;
3294   } else {
3295     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3296       AliveSuccessors.push_back(&SuccBB->front());
3297   }
3298   return UsedAssumedInformation;
3299 }
3300 
3301 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3302   ChangeStatus Change = ChangeStatus::UNCHANGED;
3303 
3304   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3305                     << getAnchorScope()->size() << "] BBs and "
3306                     << ToBeExploredFrom.size() << " exploration points and "
3307                     << KnownDeadEnds.size() << " known dead ends\n");
3308 
3309   // Copy and clear the list of instructions we need to explore from. It is
3310   // refilled with instructions the next update has to look at.
3311   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3312                                                ToBeExploredFrom.end());
3313   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3314 
3315   SmallVector<const Instruction *, 8> AliveSuccessors;
3316   while (!Worklist.empty()) {
3317     const Instruction *I = Worklist.pop_back_val();
3318     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3319 
3320     // Fast forward for uninteresting instructions. We could look for UB here
3321     // though.
3322     while (!I->isTerminator() && !isa<CallBase>(I)) {
3323       Change = ChangeStatus::CHANGED;
3324       I = I->getNextNode();
3325     }
3326 
3327     AliveSuccessors.clear();
3328 
3329     bool UsedAssumedInformation = false;
3330     switch (I->getOpcode()) {
3331     // TODO: look for (assumed) UB to backwards propagate "deadness".
3332     default:
3333       assert(I->isTerminator() &&
3334              "Expected non-terminators to be handled already!");
3335       for (const BasicBlock *SuccBB : successors(I->getParent()))
3336         AliveSuccessors.push_back(&SuccBB->front());
3337       break;
3338     case Instruction::Call:
3339       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3340                                                        *this, AliveSuccessors);
3341       break;
3342     case Instruction::Invoke:
3343       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3344                                                        *this, AliveSuccessors);
3345       break;
3346     case Instruction::Br:
3347       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3348                                                        *this, AliveSuccessors);
3349       break;
3350     case Instruction::Switch:
3351       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3352                                                        *this, AliveSuccessors);
3353       break;
3354     }
3355 
3356     if (UsedAssumedInformation) {
3357       NewToBeExploredFrom.insert(I);
3358     } else {
3359       Change = ChangeStatus::CHANGED;
3360       if (AliveSuccessors.empty() ||
3361           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3362         KnownDeadEnds.insert(I);
3363     }
3364 
3365     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3366                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3367                       << UsedAssumedInformation << "\n");
3368 
3369     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3370       if (!I->isTerminator()) {
3371         assert(AliveSuccessors.size() == 1 &&
3372                "Non-terminator expected to have a single successor!");
3373         Worklist.push_back(AliveSuccessor);
3374       } else {
3375         // record the assumed live edge
3376         AssumedLiveEdges.insert(
3377             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3378         if (assumeLive(A, *AliveSuccessor->getParent()))
3379           Worklist.push_back(AliveSuccessor);
3380       }
3381     }
3382   }
3383 
3384   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3385 
3386   // If we know everything is live there is no need to query for liveness.
3387   // Instead, indicating a pessimistic fixpoint will cause the state to be
3388   // "invalid" and all queries to be answered conservatively without lookups.
3389   // To be in this state we have to (1) finished the exploration and (3) not
3390   // discovered any non-trivial dead end and (2) not ruled unreachable code
3391   // dead.
3392   if (ToBeExploredFrom.empty() &&
3393       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3394       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3395         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3396       }))
3397     return indicatePessimisticFixpoint();
3398   return Change;
3399 }
3400 
3401 /// Liveness information for a call sites.
3402 struct AAIsDeadCallSite final : AAIsDeadFunction {
3403   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3404       : AAIsDeadFunction(IRP, A) {}
3405 
3406   /// See AbstractAttribute::initialize(...).
3407   void initialize(Attributor &A) override {
3408     // TODO: Once we have call site specific value information we can provide
3409     //       call site specific liveness information and then it makes
3410     //       sense to specialize attributes for call sites instead of
3411     //       redirecting requests to the callee.
3412     llvm_unreachable("Abstract attributes for liveness are not "
3413                      "supported for call sites yet!");
3414   }
3415 
3416   /// See AbstractAttribute::updateImpl(...).
3417   ChangeStatus updateImpl(Attributor &A) override {
3418     return indicatePessimisticFixpoint();
3419   }
3420 
3421   /// See AbstractAttribute::trackStatistics()
3422   void trackStatistics() const override {}
3423 };
3424 
3425 /// -------------------- Dereferenceable Argument Attribute --------------------
3426 
3427 template <>
3428 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3429                                                      const DerefState &R) {
3430   ChangeStatus CS0 =
3431       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3432   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3433   return CS0 | CS1;
3434 }
3435 
3436 struct AADereferenceableImpl : AADereferenceable {
3437   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3438       : AADereferenceable(IRP, A) {}
3439   using StateType = DerefState;
3440 
3441   /// See AbstractAttribute::initialize(...).
3442   void initialize(Attributor &A) override {
3443     SmallVector<Attribute, 4> Attrs;
3444     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3445              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3446     for (const Attribute &Attr : Attrs)
3447       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3448 
3449     const IRPosition &IRP = this->getIRPosition();
3450     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3451                                        /* TrackDependence */ false);
3452 
3453     bool CanBeNull;
3454     takeKnownDerefBytesMaximum(
3455         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3456             A.getDataLayout(), CanBeNull));
3457 
3458     bool IsFnInterface = IRP.isFnInterfaceKind();
3459     Function *FnScope = IRP.getAnchorScope();
3460     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3461       indicatePessimisticFixpoint();
3462       return;
3463     }
3464 
3465     if (Instruction *CtxI = getCtxI())
3466       followUsesInMBEC(*this, A, getState(), *CtxI);
3467   }
3468 
3469   /// See AbstractAttribute::getState()
3470   /// {
3471   StateType &getState() override { return *this; }
3472   const StateType &getState() const override { return *this; }
3473   /// }
3474 
3475   /// Helper function for collecting accessed bytes in must-be-executed-context
3476   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3477                               DerefState &State) {
3478     const Value *UseV = U->get();
3479     if (!UseV->getType()->isPointerTy())
3480       return;
3481 
3482     Type *PtrTy = UseV->getType();
3483     const DataLayout &DL = A.getDataLayout();
3484     int64_t Offset;
3485     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3486             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3487       if (Base == &getAssociatedValue() &&
3488           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3489         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3490         State.addAccessedBytes(Offset, Size);
3491       }
3492     }
3493     return;
3494   }
3495 
3496   /// See followUsesInMBEC
3497   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3498                        AADereferenceable::StateType &State) {
3499     bool IsNonNull = false;
3500     bool TrackUse = false;
3501     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3502         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3503     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3504                       << " for instruction " << *I << "\n");
3505 
3506     addAccessedBytesForUse(A, U, I, State);
3507     State.takeKnownDerefBytesMaximum(DerefBytes);
3508     return TrackUse;
3509   }
3510 
3511   /// See AbstractAttribute::manifest(...).
3512   ChangeStatus manifest(Attributor &A) override {
3513     ChangeStatus Change = AADereferenceable::manifest(A);
3514     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3515       removeAttrs({Attribute::DereferenceableOrNull});
3516       return ChangeStatus::CHANGED;
3517     }
3518     return Change;
3519   }
3520 
3521   void getDeducedAttributes(LLVMContext &Ctx,
3522                             SmallVectorImpl<Attribute> &Attrs) const override {
3523     // TODO: Add *_globally support
3524     if (isAssumedNonNull())
3525       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3526           Ctx, getAssumedDereferenceableBytes()));
3527     else
3528       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3529           Ctx, getAssumedDereferenceableBytes()));
3530   }
3531 
3532   /// See AbstractAttribute::getAsStr().
3533   const std::string getAsStr() const override {
3534     if (!getAssumedDereferenceableBytes())
3535       return "unknown-dereferenceable";
3536     return std::string("dereferenceable") +
3537            (isAssumedNonNull() ? "" : "_or_null") +
3538            (isAssumedGlobal() ? "_globally" : "") + "<" +
3539            std::to_string(getKnownDereferenceableBytes()) + "-" +
3540            std::to_string(getAssumedDereferenceableBytes()) + ">";
3541   }
3542 };
3543 
3544 /// Dereferenceable attribute for a floating value.
3545 struct AADereferenceableFloating : AADereferenceableImpl {
3546   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3547       : AADereferenceableImpl(IRP, A) {}
3548 
3549   /// See AbstractAttribute::updateImpl(...).
3550   ChangeStatus updateImpl(Attributor &A) override {
3551     const DataLayout &DL = A.getDataLayout();
3552 
3553     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3554                             bool Stripped) -> bool {
3555       unsigned IdxWidth =
3556           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3557       APInt Offset(IdxWidth, 0);
3558       const Value *Base =
3559           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3560 
3561       const auto &AA =
3562           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3563       int64_t DerefBytes = 0;
3564       if (!Stripped && this == &AA) {
3565         // Use IR information if we did not strip anything.
3566         // TODO: track globally.
3567         bool CanBeNull;
3568         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3569         T.GlobalState.indicatePessimisticFixpoint();
3570       } else {
3571         const DerefState &DS = AA.getState();
3572         DerefBytes = DS.DerefBytesState.getAssumed();
3573         T.GlobalState &= DS.GlobalState;
3574       }
3575 
3576       // For now we do not try to "increase" dereferenceability due to negative
3577       // indices as we first have to come up with code to deal with loops and
3578       // for overflows of the dereferenceable bytes.
3579       int64_t OffsetSExt = Offset.getSExtValue();
3580       if (OffsetSExt < 0)
3581         OffsetSExt = 0;
3582 
3583       T.takeAssumedDerefBytesMinimum(
3584           std::max(int64_t(0), DerefBytes - OffsetSExt));
3585 
3586       if (this == &AA) {
3587         if (!Stripped) {
3588           // If nothing was stripped IR information is all we got.
3589           T.takeKnownDerefBytesMaximum(
3590               std::max(int64_t(0), DerefBytes - OffsetSExt));
3591           T.indicatePessimisticFixpoint();
3592         } else if (OffsetSExt > 0) {
3593           // If something was stripped but there is circular reasoning we look
3594           // for the offset. If it is positive we basically decrease the
3595           // dereferenceable bytes in a circluar loop now, which will simply
3596           // drive them down to the known value in a very slow way which we
3597           // can accelerate.
3598           T.indicatePessimisticFixpoint();
3599         }
3600       }
3601 
3602       return T.isValidState();
3603     };
3604 
3605     DerefState T;
3606     if (!genericValueTraversal<AADereferenceable, DerefState>(
3607             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3608       return indicatePessimisticFixpoint();
3609 
3610     return clampStateAndIndicateChange(getState(), T);
3611   }
3612 
3613   /// See AbstractAttribute::trackStatistics()
3614   void trackStatistics() const override {
3615     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3616   }
3617 };
3618 
3619 /// Dereferenceable attribute for a return value.
3620 struct AADereferenceableReturned final
3621     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3622   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3623       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3624             IRP, A) {}
3625 
3626   /// See AbstractAttribute::trackStatistics()
3627   void trackStatistics() const override {
3628     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3629   }
3630 };
3631 
3632 /// Dereferenceable attribute for an argument
3633 struct AADereferenceableArgument final
3634     : AAArgumentFromCallSiteArguments<AADereferenceable,
3635                                       AADereferenceableImpl> {
3636   using Base =
3637       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3638   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3639       : Base(IRP, A) {}
3640 
3641   /// See AbstractAttribute::trackStatistics()
3642   void trackStatistics() const override {
3643     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3644   }
3645 };
3646 
3647 /// Dereferenceable attribute for a call site argument.
3648 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3649   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3650       : AADereferenceableFloating(IRP, A) {}
3651 
3652   /// See AbstractAttribute::trackStatistics()
3653   void trackStatistics() const override {
3654     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3655   }
3656 };
3657 
3658 /// Dereferenceable attribute deduction for a call site return value.
3659 struct AADereferenceableCallSiteReturned final
3660     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3661   using Base =
3662       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3663   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3664       : Base(IRP, A) {}
3665 
3666   /// See AbstractAttribute::trackStatistics()
3667   void trackStatistics() const override {
3668     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3669   }
3670 };
3671 
3672 // ------------------------ Align Argument Attribute ------------------------
3673 
3674 static unsigned getKnownAlignForUse(Attributor &A,
3675                                     AbstractAttribute &QueryingAA,
3676                                     Value &AssociatedValue, const Use *U,
3677                                     const Instruction *I, bool &TrackUse) {
3678   // We need to follow common pointer manipulation uses to the accesses they
3679   // feed into.
3680   if (isa<CastInst>(I)) {
3681     // Follow all but ptr2int casts.
3682     TrackUse = !isa<PtrToIntInst>(I);
3683     return 0;
3684   }
3685   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3686     if (GEP->hasAllConstantIndices()) {
3687       TrackUse = true;
3688       return 0;
3689     }
3690   }
3691 
3692   MaybeAlign MA;
3693   if (const auto *CB = dyn_cast<CallBase>(I)) {
3694     if (CB->isBundleOperand(U) || CB->isCallee(U))
3695       return 0;
3696 
3697     unsigned ArgNo = CB->getArgOperandNo(U);
3698     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3699     // As long as we only use known information there is no need to track
3700     // dependences here.
3701     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3702                                         /* TrackDependence */ false);
3703     MA = MaybeAlign(AlignAA.getKnownAlign());
3704   }
3705 
3706   const DataLayout &DL = A.getDataLayout();
3707   const Value *UseV = U->get();
3708   if (auto *SI = dyn_cast<StoreInst>(I)) {
3709     if (SI->getPointerOperand() == UseV)
3710       MA = SI->getAlign();
3711   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3712     if (LI->getPointerOperand() == UseV)
3713       MA = LI->getAlign();
3714   }
3715 
3716   if (!MA || *MA <= 1)
3717     return 0;
3718 
3719   unsigned Alignment = MA->value();
3720   int64_t Offset;
3721 
3722   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3723     if (Base == &AssociatedValue) {
3724       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3725       // So we can say that the maximum power of two which is a divisor of
3726       // gcd(Offset, Alignment) is an alignment.
3727 
3728       uint32_t gcd =
3729           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3730       Alignment = llvm::PowerOf2Floor(gcd);
3731     }
3732   }
3733 
3734   return Alignment;
3735 }
3736 
3737 struct AAAlignImpl : AAAlign {
3738   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3739 
3740   /// See AbstractAttribute::initialize(...).
3741   void initialize(Attributor &A) override {
3742     SmallVector<Attribute, 4> Attrs;
3743     getAttrs({Attribute::Alignment}, Attrs);
3744     for (const Attribute &Attr : Attrs)
3745       takeKnownMaximum(Attr.getValueAsInt());
3746 
3747     Value &V = getAssociatedValue();
3748     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3749     //       use of the function pointer. This was caused by D73131. We want to
3750     //       avoid this for function pointers especially because we iterate
3751     //       their uses and int2ptr is not handled. It is not a correctness
3752     //       problem though!
3753     if (!V.getType()->getPointerElementType()->isFunctionTy())
3754       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3755 
3756     if (getIRPosition().isFnInterfaceKind() &&
3757         (!getAnchorScope() ||
3758          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3759       indicatePessimisticFixpoint();
3760       return;
3761     }
3762 
3763     if (Instruction *CtxI = getCtxI())
3764       followUsesInMBEC(*this, A, getState(), *CtxI);
3765   }
3766 
3767   /// See AbstractAttribute::manifest(...).
3768   ChangeStatus manifest(Attributor &A) override {
3769     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3770 
3771     // Check for users that allow alignment annotations.
3772     Value &AssociatedValue = getAssociatedValue();
3773     for (const Use &U : AssociatedValue.uses()) {
3774       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3775         if (SI->getPointerOperand() == &AssociatedValue)
3776           if (SI->getAlignment() < getAssumedAlign()) {
3777             STATS_DECLTRACK(AAAlign, Store,
3778                             "Number of times alignment added to a store");
3779             SI->setAlignment(Align(getAssumedAlign()));
3780             LoadStoreChanged = ChangeStatus::CHANGED;
3781           }
3782       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3783         if (LI->getPointerOperand() == &AssociatedValue)
3784           if (LI->getAlignment() < getAssumedAlign()) {
3785             LI->setAlignment(Align(getAssumedAlign()));
3786             STATS_DECLTRACK(AAAlign, Load,
3787                             "Number of times alignment added to a load");
3788             LoadStoreChanged = ChangeStatus::CHANGED;
3789           }
3790       }
3791     }
3792 
3793     ChangeStatus Changed = AAAlign::manifest(A);
3794 
3795     Align InheritAlign =
3796         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3797     if (InheritAlign >= getAssumedAlign())
3798       return LoadStoreChanged;
3799     return Changed | LoadStoreChanged;
3800   }
3801 
3802   // TODO: Provide a helper to determine the implied ABI alignment and check in
3803   //       the existing manifest method and a new one for AAAlignImpl that value
3804   //       to avoid making the alignment explicit if it did not improve.
3805 
3806   /// See AbstractAttribute::getDeducedAttributes
3807   virtual void
3808   getDeducedAttributes(LLVMContext &Ctx,
3809                        SmallVectorImpl<Attribute> &Attrs) const override {
3810     if (getAssumedAlign() > 1)
3811       Attrs.emplace_back(
3812           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3813   }
3814 
3815   /// See followUsesInMBEC
3816   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3817                        AAAlign::StateType &State) {
3818     bool TrackUse = false;
3819 
3820     unsigned int KnownAlign =
3821         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3822     State.takeKnownMaximum(KnownAlign);
3823 
3824     return TrackUse;
3825   }
3826 
3827   /// See AbstractAttribute::getAsStr().
3828   const std::string getAsStr() const override {
3829     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3830                                 "-" + std::to_string(getAssumedAlign()) + ">")
3831                              : "unknown-align";
3832   }
3833 };
3834 
3835 /// Align attribute for a floating value.
3836 struct AAAlignFloating : AAAlignImpl {
3837   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3838 
3839   /// See AbstractAttribute::updateImpl(...).
3840   ChangeStatus updateImpl(Attributor &A) override {
3841     const DataLayout &DL = A.getDataLayout();
3842 
3843     auto VisitValueCB = [&](Value &V, const Instruction *,
3844                             AAAlign::StateType &T, bool Stripped) -> bool {
3845       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3846       if (!Stripped && this == &AA) {
3847         // Use only IR information if we did not strip anything.
3848         Align PA = V.getPointerAlignment(DL);
3849         T.takeKnownMaximum(PA.value());
3850         T.indicatePessimisticFixpoint();
3851       } else {
3852         // Use abstract attribute information.
3853         const AAAlign::StateType &DS = AA.getState();
3854         T ^= DS;
3855       }
3856       return T.isValidState();
3857     };
3858 
3859     StateType T;
3860     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3861                                                    VisitValueCB, getCtxI()))
3862       return indicatePessimisticFixpoint();
3863 
3864     // TODO: If we know we visited all incoming values, thus no are assumed
3865     // dead, we can take the known information from the state T.
3866     return clampStateAndIndicateChange(getState(), T);
3867   }
3868 
3869   /// See AbstractAttribute::trackStatistics()
3870   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3871 };
3872 
3873 /// Align attribute for function return value.
3874 struct AAAlignReturned final
3875     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3876   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3877   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3878 
3879   /// See AbstractAttribute::initialize(...).
3880   void initialize(Attributor &A) override {
3881     Base::initialize(A);
3882     Function *F = getAssociatedFunction();
3883     if (!F || F->isDeclaration())
3884       indicatePessimisticFixpoint();
3885   }
3886 
3887   /// See AbstractAttribute::trackStatistics()
3888   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3889 };
3890 
3891 /// Align attribute for function argument.
3892 struct AAAlignArgument final
3893     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3894   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3895   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3896 
3897   /// See AbstractAttribute::manifest(...).
3898   ChangeStatus manifest(Attributor &A) override {
3899     // If the associated argument is involved in a must-tail call we give up
3900     // because we would need to keep the argument alignments of caller and
3901     // callee in-sync. Just does not seem worth the trouble right now.
3902     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3903       return ChangeStatus::UNCHANGED;
3904     return Base::manifest(A);
3905   }
3906 
3907   /// See AbstractAttribute::trackStatistics()
3908   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3909 };
3910 
3911 struct AAAlignCallSiteArgument final : AAAlignFloating {
3912   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3913       : AAAlignFloating(IRP, A) {}
3914 
3915   /// See AbstractAttribute::manifest(...).
3916   ChangeStatus manifest(Attributor &A) override {
3917     // If the associated argument is involved in a must-tail call we give up
3918     // because we would need to keep the argument alignments of caller and
3919     // callee in-sync. Just does not seem worth the trouble right now.
3920     if (Argument *Arg = getAssociatedArgument())
3921       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3922         return ChangeStatus::UNCHANGED;
3923     ChangeStatus Changed = AAAlignImpl::manifest(A);
3924     Align InheritAlign =
3925         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3926     if (InheritAlign >= getAssumedAlign())
3927       Changed = ChangeStatus::UNCHANGED;
3928     return Changed;
3929   }
3930 
3931   /// See AbstractAttribute::updateImpl(Attributor &A).
3932   ChangeStatus updateImpl(Attributor &A) override {
3933     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3934     if (Argument *Arg = getAssociatedArgument()) {
3935       // We only take known information from the argument
3936       // so we do not need to track a dependence.
3937       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3938           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3939       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3940     }
3941     return Changed;
3942   }
3943 
3944   /// See AbstractAttribute::trackStatistics()
3945   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3946 };
3947 
3948 /// Align attribute deduction for a call site return value.
3949 struct AAAlignCallSiteReturned final
3950     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3951   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3952   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3953       : Base(IRP, A) {}
3954 
3955   /// See AbstractAttribute::initialize(...).
3956   void initialize(Attributor &A) override {
3957     Base::initialize(A);
3958     Function *F = getAssociatedFunction();
3959     if (!F || F->isDeclaration())
3960       indicatePessimisticFixpoint();
3961   }
3962 
3963   /// See AbstractAttribute::trackStatistics()
3964   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3965 };
3966 
3967 /// ------------------ Function No-Return Attribute ----------------------------
3968 struct AANoReturnImpl : public AANoReturn {
3969   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3970 
3971   /// See AbstractAttribute::initialize(...).
3972   void initialize(Attributor &A) override {
3973     AANoReturn::initialize(A);
3974     Function *F = getAssociatedFunction();
3975     if (!F || F->isDeclaration())
3976       indicatePessimisticFixpoint();
3977   }
3978 
3979   /// See AbstractAttribute::getAsStr().
3980   const std::string getAsStr() const override {
3981     return getAssumed() ? "noreturn" : "may-return";
3982   }
3983 
3984   /// See AbstractAttribute::updateImpl(Attributor &A).
3985   virtual ChangeStatus updateImpl(Attributor &A) override {
3986     auto CheckForNoReturn = [](Instruction &) { return false; };
3987     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3988                                    {(unsigned)Instruction::Ret}))
3989       return indicatePessimisticFixpoint();
3990     return ChangeStatus::UNCHANGED;
3991   }
3992 };
3993 
3994 struct AANoReturnFunction final : AANoReturnImpl {
3995   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3996       : AANoReturnImpl(IRP, A) {}
3997 
3998   /// See AbstractAttribute::trackStatistics()
3999   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4000 };
4001 
4002 /// NoReturn attribute deduction for a call sites.
4003 struct AANoReturnCallSite final : AANoReturnImpl {
4004   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4005       : AANoReturnImpl(IRP, A) {}
4006 
4007   /// See AbstractAttribute::updateImpl(...).
4008   ChangeStatus updateImpl(Attributor &A) override {
4009     // TODO: Once we have call site specific value information we can provide
4010     //       call site specific liveness information and then it makes
4011     //       sense to specialize attributes for call sites arguments instead of
4012     //       redirecting requests to the callee argument.
4013     Function *F = getAssociatedFunction();
4014     const IRPosition &FnPos = IRPosition::function(*F);
4015     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4016     return clampStateAndIndicateChange(getState(), FnAA.getState());
4017   }
4018 
4019   /// See AbstractAttribute::trackStatistics()
4020   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4021 };
4022 
4023 /// ----------------------- Variable Capturing ---------------------------------
4024 
4025 /// A class to hold the state of for no-capture attributes.
4026 struct AANoCaptureImpl : public AANoCapture {
4027   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4028 
4029   /// See AbstractAttribute::initialize(...).
4030   void initialize(Attributor &A) override {
4031     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4032       indicateOptimisticFixpoint();
4033       return;
4034     }
4035     Function *AnchorScope = getAnchorScope();
4036     if (isFnInterfaceKind() &&
4037         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4038       indicatePessimisticFixpoint();
4039       return;
4040     }
4041 
4042     // You cannot "capture" null in the default address space.
4043     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4044         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4045       indicateOptimisticFixpoint();
4046       return;
4047     }
4048 
4049     const Function *F = isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4050 
4051     // Check what state the associated function can actually capture.
4052     if (F)
4053       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4054     else
4055       indicatePessimisticFixpoint();
4056   }
4057 
4058   /// See AbstractAttribute::updateImpl(...).
4059   ChangeStatus updateImpl(Attributor &A) override;
4060 
4061   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4062   virtual void
4063   getDeducedAttributes(LLVMContext &Ctx,
4064                        SmallVectorImpl<Attribute> &Attrs) const override {
4065     if (!isAssumedNoCaptureMaybeReturned())
4066       return;
4067 
4068     if (isArgumentPosition()) {
4069       if (isAssumedNoCapture())
4070         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4071       else if (ManifestInternal)
4072         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4073     }
4074   }
4075 
4076   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4077   /// depending on the ability of the function associated with \p IRP to capture
4078   /// state in memory and through "returning/throwing", respectively.
4079   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4080                                                    const Function &F,
4081                                                    BitIntegerState &State) {
4082     // TODO: Once we have memory behavior attributes we should use them here.
4083 
4084     // If we know we cannot communicate or write to memory, we do not care about
4085     // ptr2int anymore.
4086     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4087         F.getReturnType()->isVoidTy()) {
4088       State.addKnownBits(NO_CAPTURE);
4089       return;
4090     }
4091 
4092     // A function cannot capture state in memory if it only reads memory, it can
4093     // however return/throw state and the state might be influenced by the
4094     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4095     if (F.onlyReadsMemory())
4096       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4097 
4098     // A function cannot communicate state back if it does not through
4099     // exceptions and doesn not return values.
4100     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4101       State.addKnownBits(NOT_CAPTURED_IN_RET);
4102 
4103     // Check existing "returned" attributes.
4104     int ArgNo = IRP.getCalleeArgNo();
4105     if (F.doesNotThrow() && ArgNo >= 0) {
4106       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4107         if (F.hasParamAttribute(u, Attribute::Returned)) {
4108           if (u == unsigned(ArgNo))
4109             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4110           else if (F.onlyReadsMemory())
4111             State.addKnownBits(NO_CAPTURE);
4112           else
4113             State.addKnownBits(NOT_CAPTURED_IN_RET);
4114           break;
4115         }
4116     }
4117   }
4118 
4119   /// See AbstractState::getAsStr().
4120   const std::string getAsStr() const override {
4121     if (isKnownNoCapture())
4122       return "known not-captured";
4123     if (isAssumedNoCapture())
4124       return "assumed not-captured";
4125     if (isKnownNoCaptureMaybeReturned())
4126       return "known not-captured-maybe-returned";
4127     if (isAssumedNoCaptureMaybeReturned())
4128       return "assumed not-captured-maybe-returned";
4129     return "assumed-captured";
4130   }
4131 };
4132 
4133 /// Attributor-aware capture tracker.
4134 struct AACaptureUseTracker final : public CaptureTracker {
4135 
4136   /// Create a capture tracker that can lookup in-flight abstract attributes
4137   /// through the Attributor \p A.
4138   ///
4139   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4140   /// search is stopped. If a use leads to a return instruction,
4141   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4142   /// If a use leads to a ptr2int which may capture the value,
4143   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4144   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4145   /// set. All values in \p PotentialCopies are later tracked as well. For every
4146   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4147   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4148   /// conservatively set to true.
4149   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4150                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4151                       SmallVectorImpl<const Value *> &PotentialCopies,
4152                       unsigned &RemainingUsesToExplore)
4153       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4154         PotentialCopies(PotentialCopies),
4155         RemainingUsesToExplore(RemainingUsesToExplore) {}
4156 
4157   /// Determine if \p V maybe captured. *Also updates the state!*
4158   bool valueMayBeCaptured(const Value *V) {
4159     if (V->getType()->isPointerTy()) {
4160       PointerMayBeCaptured(V, this);
4161     } else {
4162       State.indicatePessimisticFixpoint();
4163     }
4164     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4165   }
4166 
4167   /// See CaptureTracker::tooManyUses().
4168   void tooManyUses() override {
4169     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4170   }
4171 
4172   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4173     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4174       return true;
4175     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4176         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4177         DepClassTy::OPTIONAL);
4178     return DerefAA.getAssumedDereferenceableBytes();
4179   }
4180 
4181   /// See CaptureTracker::captured(...).
4182   bool captured(const Use *U) override {
4183     Instruction *UInst = cast<Instruction>(U->getUser());
4184     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4185                       << "\n");
4186 
4187     // Because we may reuse the tracker multiple times we keep track of the
4188     // number of explored uses ourselves as well.
4189     if (RemainingUsesToExplore-- == 0) {
4190       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4191       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4192                           /* Return */ true);
4193     }
4194 
4195     // Deal with ptr2int by following uses.
4196     if (isa<PtrToIntInst>(UInst)) {
4197       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4198       return valueMayBeCaptured(UInst);
4199     }
4200 
4201     // Explicitly catch return instructions.
4202     if (isa<ReturnInst>(UInst))
4203       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4204                           /* Return */ true);
4205 
4206     // For now we only use special logic for call sites. However, the tracker
4207     // itself knows about a lot of other non-capturing cases already.
4208     auto *CB = dyn_cast<CallBase>(UInst);
4209     if (!CB || !CB->isArgOperand(U))
4210       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4211                           /* Return */ true);
4212 
4213     unsigned ArgNo = CB->getArgOperandNo(U);
4214     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4215     // If we have a abstract no-capture attribute for the argument we can use
4216     // it to justify a non-capture attribute here. This allows recursion!
4217     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4218     if (ArgNoCaptureAA.isAssumedNoCapture())
4219       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4220                           /* Return */ false);
4221     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4222       addPotentialCopy(*CB);
4223       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4224                           /* Return */ false);
4225     }
4226 
4227     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4228     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4229                         /* Return */ true);
4230   }
4231 
4232   /// Register \p CS as potential copy of the value we are checking.
4233   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4234 
4235   /// See CaptureTracker::shouldExplore(...).
4236   bool shouldExplore(const Use *U) override {
4237     // Check liveness and ignore droppable users.
4238     return !U->getUser()->isDroppable() &&
4239            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4240   }
4241 
4242   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4243   /// \p CapturedInRet, then return the appropriate value for use in the
4244   /// CaptureTracker::captured() interface.
4245   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4246                     bool CapturedInRet) {
4247     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4248                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4249     if (CapturedInMem)
4250       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4251     if (CapturedInInt)
4252       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4253     if (CapturedInRet)
4254       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4255     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4256   }
4257 
4258 private:
4259   /// The attributor providing in-flight abstract attributes.
4260   Attributor &A;
4261 
4262   /// The abstract attribute currently updated.
4263   AANoCapture &NoCaptureAA;
4264 
4265   /// The abstract liveness state.
4266   const AAIsDead &IsDeadAA;
4267 
4268   /// The state currently updated.
4269   AANoCapture::StateType &State;
4270 
4271   /// Set of potential copies of the tracked value.
4272   SmallVectorImpl<const Value *> &PotentialCopies;
4273 
4274   /// Global counter to limit the number of explored uses.
4275   unsigned &RemainingUsesToExplore;
4276 };
4277 
4278 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4279   const IRPosition &IRP = getIRPosition();
4280   const Value *V =
4281       isArgumentPosition() ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4282   if (!V)
4283     return indicatePessimisticFixpoint();
4284 
4285   const Function *F =
4286       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4287   assert(F && "Expected a function!");
4288   const IRPosition &FnPos = IRPosition::function(*F);
4289   const auto &IsDeadAA =
4290       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4291 
4292   AANoCapture::StateType T;
4293 
4294   // Readonly means we cannot capture through memory.
4295   const auto &FnMemAA =
4296       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4297   if (FnMemAA.isAssumedReadOnly()) {
4298     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4299     if (FnMemAA.isKnownReadOnly())
4300       addKnownBits(NOT_CAPTURED_IN_MEM);
4301     else
4302       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4303   }
4304 
4305   // Make sure all returned values are different than the underlying value.
4306   // TODO: we could do this in a more sophisticated way inside
4307   //       AAReturnedValues, e.g., track all values that escape through returns
4308   //       directly somehow.
4309   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4310     bool SeenConstant = false;
4311     for (auto &It : RVAA.returned_values()) {
4312       if (isa<Constant>(It.first)) {
4313         if (SeenConstant)
4314           return false;
4315         SeenConstant = true;
4316       } else if (!isa<Argument>(It.first) ||
4317                  It.first == getAssociatedArgument())
4318         return false;
4319     }
4320     return true;
4321   };
4322 
4323   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4324       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4325   if (NoUnwindAA.isAssumedNoUnwind()) {
4326     bool IsVoidTy = F->getReturnType()->isVoidTy();
4327     const AAReturnedValues *RVAA =
4328         IsVoidTy ? nullptr
4329                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4330                                                  /* TrackDependence */ true,
4331                                                  DepClassTy::OPTIONAL);
4332     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4333       T.addKnownBits(NOT_CAPTURED_IN_RET);
4334       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4335         return ChangeStatus::UNCHANGED;
4336       if (NoUnwindAA.isKnownNoUnwind() &&
4337           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4338         addKnownBits(NOT_CAPTURED_IN_RET);
4339         if (isKnown(NOT_CAPTURED_IN_MEM))
4340           return indicateOptimisticFixpoint();
4341       }
4342     }
4343   }
4344 
4345   // Use the CaptureTracker interface and logic with the specialized tracker,
4346   // defined in AACaptureUseTracker, that can look at in-flight abstract
4347   // attributes and directly updates the assumed state.
4348   SmallVector<const Value *, 4> PotentialCopies;
4349   unsigned RemainingUsesToExplore =
4350       getDefaultMaxUsesToExploreForCaptureTracking();
4351   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4352                               RemainingUsesToExplore);
4353 
4354   // Check all potential copies of the associated value until we can assume
4355   // none will be captured or we have to assume at least one might be.
4356   unsigned Idx = 0;
4357   PotentialCopies.push_back(V);
4358   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4359     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4360 
4361   AANoCapture::StateType &S = getState();
4362   auto Assumed = S.getAssumed();
4363   S.intersectAssumedBits(T.getAssumed());
4364   if (!isAssumedNoCaptureMaybeReturned())
4365     return indicatePessimisticFixpoint();
4366   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4367                                    : ChangeStatus::CHANGED;
4368 }
4369 
4370 /// NoCapture attribute for function arguments.
4371 struct AANoCaptureArgument final : AANoCaptureImpl {
4372   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4373       : AANoCaptureImpl(IRP, A) {}
4374 
4375   /// See AbstractAttribute::trackStatistics()
4376   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4377 };
4378 
4379 /// NoCapture attribute for call site arguments.
4380 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4381   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4382       : AANoCaptureImpl(IRP, A) {}
4383 
4384   /// See AbstractAttribute::initialize(...).
4385   void initialize(Attributor &A) override {
4386     if (Argument *Arg = getAssociatedArgument())
4387       if (Arg->hasByValAttr())
4388         indicateOptimisticFixpoint();
4389     AANoCaptureImpl::initialize(A);
4390   }
4391 
4392   /// See AbstractAttribute::updateImpl(...).
4393   ChangeStatus updateImpl(Attributor &A) override {
4394     // TODO: Once we have call site specific value information we can provide
4395     //       call site specific liveness information and then it makes
4396     //       sense to specialize attributes for call sites arguments instead of
4397     //       redirecting requests to the callee argument.
4398     Argument *Arg = getAssociatedArgument();
4399     if (!Arg)
4400       return indicatePessimisticFixpoint();
4401     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4402     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4403     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4404   }
4405 
4406   /// See AbstractAttribute::trackStatistics()
4407   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4408 };
4409 
4410 /// NoCapture attribute for floating values.
4411 struct AANoCaptureFloating final : AANoCaptureImpl {
4412   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4413       : AANoCaptureImpl(IRP, A) {}
4414 
4415   /// See AbstractAttribute::trackStatistics()
4416   void trackStatistics() const override {
4417     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4418   }
4419 };
4420 
4421 /// NoCapture attribute for function return value.
4422 struct AANoCaptureReturned final : AANoCaptureImpl {
4423   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4424       : AANoCaptureImpl(IRP, A) {
4425     llvm_unreachable("NoCapture is not applicable to function returns!");
4426   }
4427 
4428   /// See AbstractAttribute::initialize(...).
4429   void initialize(Attributor &A) override {
4430     llvm_unreachable("NoCapture is not applicable to function returns!");
4431   }
4432 
4433   /// See AbstractAttribute::updateImpl(...).
4434   ChangeStatus updateImpl(Attributor &A) override {
4435     llvm_unreachable("NoCapture is not applicable to function returns!");
4436   }
4437 
4438   /// See AbstractAttribute::trackStatistics()
4439   void trackStatistics() const override {}
4440 };
4441 
4442 /// NoCapture attribute deduction for a call site return value.
4443 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4444   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4445       : AANoCaptureImpl(IRP, A) {}
4446 
4447   /// See AbstractAttribute::trackStatistics()
4448   void trackStatistics() const override {
4449     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4450   }
4451 };
4452 
4453 /// ------------------ Value Simplify Attribute ----------------------------
4454 struct AAValueSimplifyImpl : AAValueSimplify {
4455   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4456       : AAValueSimplify(IRP, A) {}
4457 
4458   /// See AbstractAttribute::initialize(...).
4459   void initialize(Attributor &A) override {
4460     if (getAssociatedValue().getType()->isVoidTy())
4461       indicatePessimisticFixpoint();
4462   }
4463 
4464   /// See AbstractAttribute::getAsStr().
4465   const std::string getAsStr() const override {
4466     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4467                         : "not-simple";
4468   }
4469 
4470   /// See AbstractAttribute::trackStatistics()
4471   void trackStatistics() const override {}
4472 
4473   /// See AAValueSimplify::getAssumedSimplifiedValue()
4474   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4475     if (!getAssumed())
4476       return const_cast<Value *>(&getAssociatedValue());
4477     return SimplifiedAssociatedValue;
4478   }
4479 
4480   /// Helper function for querying AAValueSimplify and updating candicate.
4481   /// \param QueryingValue Value trying to unify with SimplifiedValue
4482   /// \param AccumulatedSimplifiedValue Current simplification result.
4483   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4484                              Value &QueryingValue,
4485                              Optional<Value *> &AccumulatedSimplifiedValue) {
4486     // FIXME: Add a typecast support.
4487 
4488     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4489         QueryingAA, IRPosition::value(QueryingValue));
4490 
4491     Optional<Value *> QueryingValueSimplified =
4492         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4493 
4494     if (!QueryingValueSimplified.hasValue())
4495       return true;
4496 
4497     if (!QueryingValueSimplified.getValue())
4498       return false;
4499 
4500     Value &QueryingValueSimplifiedUnwrapped =
4501         *QueryingValueSimplified.getValue();
4502 
4503     if (AccumulatedSimplifiedValue.hasValue() &&
4504         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4505         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4506       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4507     if (AccumulatedSimplifiedValue.hasValue() &&
4508         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4509       return true;
4510 
4511     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4512                       << " is assumed to be "
4513                       << QueryingValueSimplifiedUnwrapped << "\n");
4514 
4515     AccumulatedSimplifiedValue = QueryingValueSimplified;
4516     return true;
4517   }
4518 
4519   /// Returns a candidate is found or not
4520   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4521     if (!getAssociatedValue().getType()->isIntegerTy())
4522       return false;
4523 
4524     const auto &AA =
4525         A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false);
4526 
4527     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4528 
4529     if (!COpt.hasValue()) {
4530       SimplifiedAssociatedValue = llvm::None;
4531       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4532       return true;
4533     }
4534     if (auto *C = COpt.getValue()) {
4535       SimplifiedAssociatedValue = C;
4536       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4537       return true;
4538     }
4539     return false;
4540   }
4541 
4542   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4543     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4544       return true;
4545     if (askSimplifiedValueFor<AAPotentialValues>(A))
4546       return true;
4547     return false;
4548   }
4549 
4550   /// See AbstractAttribute::manifest(...).
4551   ChangeStatus manifest(Attributor &A) override {
4552     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4553 
4554     if (SimplifiedAssociatedValue.hasValue() &&
4555         !SimplifiedAssociatedValue.getValue())
4556       return Changed;
4557 
4558     Value &V = getAssociatedValue();
4559     auto *C = SimplifiedAssociatedValue.hasValue()
4560                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4561                   : UndefValue::get(V.getType());
4562     if (C) {
4563       // We can replace the AssociatedValue with the constant.
4564       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4565         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4566                           << " :: " << *this << "\n");
4567         if (A.changeValueAfterManifest(V, *C))
4568           Changed = ChangeStatus::CHANGED;
4569       }
4570     }
4571 
4572     return Changed | AAValueSimplify::manifest(A);
4573   }
4574 
4575   /// See AbstractState::indicatePessimisticFixpoint(...).
4576   ChangeStatus indicatePessimisticFixpoint() override {
4577     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4578     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4579     SimplifiedAssociatedValue = &getAssociatedValue();
4580     indicateOptimisticFixpoint();
4581     return ChangeStatus::CHANGED;
4582   }
4583 
4584 protected:
4585   // An assumed simplified value. Initially, it is set to Optional::None, which
4586   // means that the value is not clear under current assumption. If in the
4587   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4588   // returns orignal associated value.
4589   Optional<Value *> SimplifiedAssociatedValue;
4590 };
4591 
4592 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4593   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4594       : AAValueSimplifyImpl(IRP, A) {}
4595 
4596   void initialize(Attributor &A) override {
4597     AAValueSimplifyImpl::initialize(A);
4598     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4599       indicatePessimisticFixpoint();
4600     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4601                  Attribute::StructRet, Attribute::Nest},
4602                 /* IgnoreSubsumingPositions */ true))
4603       indicatePessimisticFixpoint();
4604 
4605     // FIXME: This is a hack to prevent us from propagating function poiner in
4606     // the new pass manager CGSCC pass as it creates call edges the
4607     // CallGraphUpdater cannot handle yet.
4608     Value &V = getAssociatedValue();
4609     if (V.getType()->isPointerTy() &&
4610         V.getType()->getPointerElementType()->isFunctionTy() &&
4611         !A.isModulePass())
4612       indicatePessimisticFixpoint();
4613   }
4614 
4615   /// See AbstractAttribute::updateImpl(...).
4616   ChangeStatus updateImpl(Attributor &A) override {
4617     // Byval is only replacable if it is readonly otherwise we would write into
4618     // the replaced value and not the copy that byval creates implicitly.
4619     Argument *Arg = getAssociatedArgument();
4620     if (Arg->hasByValAttr()) {
4621       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4622       //       there is no race by not copying a constant byval.
4623       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4624       if (!MemAA.isAssumedReadOnly())
4625         return indicatePessimisticFixpoint();
4626     }
4627 
4628     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4629 
4630     auto PredForCallSite = [&](AbstractCallSite ACS) {
4631       const IRPosition &ACSArgPos =
4632           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4633       // Check if a coresponding argument was found or if it is on not
4634       // associated (which can happen for callback calls).
4635       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4636         return false;
4637 
4638       // We can only propagate thread independent values through callbacks.
4639       // This is different to direct/indirect call sites because for them we
4640       // know the thread executing the caller and callee is the same. For
4641       // callbacks this is not guaranteed, thus a thread dependent value could
4642       // be different for the caller and callee, making it invalid to propagate.
4643       Value &ArgOp = ACSArgPos.getAssociatedValue();
4644       if (ACS.isCallbackCall())
4645         if (auto *C = dyn_cast<Constant>(&ArgOp))
4646           if (C->isThreadDependent())
4647             return false;
4648       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4649     };
4650 
4651     bool AllCallSitesKnown;
4652     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4653                                 AllCallSitesKnown))
4654       if (!askSimplifiedValueForOtherAAs(A))
4655         return indicatePessimisticFixpoint();
4656 
4657     // If a candicate was found in this update, return CHANGED.
4658     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4659                ? ChangeStatus::UNCHANGED
4660                : ChangeStatus ::CHANGED;
4661   }
4662 
4663   /// See AbstractAttribute::trackStatistics()
4664   void trackStatistics() const override {
4665     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4666   }
4667 };
4668 
4669 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4670   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4671       : AAValueSimplifyImpl(IRP, A) {}
4672 
4673   /// See AbstractAttribute::updateImpl(...).
4674   ChangeStatus updateImpl(Attributor &A) override {
4675     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4676 
4677     auto PredForReturned = [&](Value &V) {
4678       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4679     };
4680 
4681     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4682       if (!askSimplifiedValueForOtherAAs(A))
4683         return indicatePessimisticFixpoint();
4684 
4685     // If a candicate was found in this update, return CHANGED.
4686     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4687                ? ChangeStatus::UNCHANGED
4688                : ChangeStatus ::CHANGED;
4689   }
4690 
4691   ChangeStatus manifest(Attributor &A) override {
4692     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4693 
4694     if (SimplifiedAssociatedValue.hasValue() &&
4695         !SimplifiedAssociatedValue.getValue())
4696       return Changed;
4697 
4698     Value &V = getAssociatedValue();
4699     auto *C = SimplifiedAssociatedValue.hasValue()
4700                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4701                   : UndefValue::get(V.getType());
4702     if (C) {
4703       auto PredForReturned =
4704           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4705             // We can replace the AssociatedValue with the constant.
4706             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4707               return true;
4708 
4709             for (ReturnInst *RI : RetInsts) {
4710               if (RI->getFunction() != getAnchorScope())
4711                 continue;
4712               auto *RC = C;
4713               if (RC->getType() != RI->getReturnValue()->getType())
4714                 RC = ConstantExpr::getBitCast(RC,
4715                                               RI->getReturnValue()->getType());
4716               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4717                                 << " in " << *RI << " :: " << *this << "\n");
4718               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4719                 Changed = ChangeStatus::CHANGED;
4720             }
4721             return true;
4722           };
4723       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4724     }
4725 
4726     return Changed | AAValueSimplify::manifest(A);
4727   }
4728 
4729   /// See AbstractAttribute::trackStatistics()
4730   void trackStatistics() const override {
4731     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4732   }
4733 };
4734 
4735 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4736   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4737       : AAValueSimplifyImpl(IRP, A) {}
4738 
4739   /// See AbstractAttribute::initialize(...).
4740   void initialize(Attributor &A) override {
4741     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4742     //        Needs investigation.
4743     // AAValueSimplifyImpl::initialize(A);
4744     Value &V = getAnchorValue();
4745 
4746     // TODO: add other stuffs
4747     if (isa<Constant>(V))
4748       indicatePessimisticFixpoint();
4749   }
4750 
4751   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4752   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4753   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4754   /// updated and \p Changed is set appropriately.
4755   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4756                               ChangeStatus &Changed) {
4757     if (!ICmp)
4758       return false;
4759     if (!ICmp->isEquality())
4760       return false;
4761 
4762     // This is a comparison with == or !-. We check for nullptr now.
4763     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4764     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4765     if (!Op0IsNull && !Op1IsNull)
4766       return false;
4767 
4768     LLVMContext &Ctx = ICmp->getContext();
4769     // Check for `nullptr ==/!= nullptr` first:
4770     if (Op0IsNull && Op1IsNull) {
4771       Value *NewVal = ConstantInt::get(
4772           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4773       SimplifiedAssociatedValue = NewVal;
4774       indicateOptimisticFixpoint();
4775       assert(!SimplifiedAssociatedValue.hasValue() &&
4776              "Did not expect non-fixed value for constant comparison");
4777       Changed = ChangeStatus::CHANGED;
4778       return true;
4779     }
4780 
4781     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4782     // non-nullptr operand and if we assume it's non-null we can conclude the
4783     // result of the comparison.
4784     assert((Op0IsNull || Op1IsNull) &&
4785            "Expected nullptr versus non-nullptr comparison at this point");
4786 
4787     // The index is the operand that we assume is not null.
4788     unsigned PtrIdx = Op0IsNull;
4789     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4790         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)));
4791     if (!PtrNonNullAA.isAssumedNonNull())
4792       return false;
4793 
4794     // The new value depends on the predicate, true for != and false for ==.
4795     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4796                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4797 
4798     assert((!SimplifiedAssociatedValue.hasValue() ||
4799             SimplifiedAssociatedValue == NewVal) &&
4800            "Did not expect to change value for zero-comparison");
4801 
4802     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4803     SimplifiedAssociatedValue = NewVal;
4804 
4805     if (PtrNonNullAA.isKnownNonNull())
4806       indicateOptimisticFixpoint();
4807 
4808     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4809     return true;
4810   }
4811 
4812   /// See AbstractAttribute::updateImpl(...).
4813   ChangeStatus updateImpl(Attributor &A) override {
4814     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4815 
4816     ChangeStatus Changed;
4817     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4818                                Changed))
4819       return Changed;
4820 
4821     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4822                             bool Stripped) -> bool {
4823       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4824       if (!Stripped && this == &AA) {
4825         // TODO: Look the instruction and check recursively.
4826 
4827         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4828                           << "\n");
4829         return false;
4830       }
4831       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4832     };
4833 
4834     bool Dummy = false;
4835     if (!genericValueTraversal<AAValueSimplify, bool>(
4836             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4837             /* UseValueSimplify */ false))
4838       if (!askSimplifiedValueForOtherAAs(A))
4839         return indicatePessimisticFixpoint();
4840 
4841     // If a candicate was found in this update, return CHANGED.
4842 
4843     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4844                ? ChangeStatus::UNCHANGED
4845                : ChangeStatus ::CHANGED;
4846   }
4847 
4848   /// See AbstractAttribute::trackStatistics()
4849   void trackStatistics() const override {
4850     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4851   }
4852 };
4853 
4854 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4855   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4856       : AAValueSimplifyImpl(IRP, A) {}
4857 
4858   /// See AbstractAttribute::initialize(...).
4859   void initialize(Attributor &A) override {
4860     SimplifiedAssociatedValue = &getAnchorValue();
4861     indicateOptimisticFixpoint();
4862   }
4863   /// See AbstractAttribute::initialize(...).
4864   ChangeStatus updateImpl(Attributor &A) override {
4865     llvm_unreachable(
4866         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4867   }
4868   /// See AbstractAttribute::trackStatistics()
4869   void trackStatistics() const override {
4870     STATS_DECLTRACK_FN_ATTR(value_simplify)
4871   }
4872 };
4873 
4874 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4875   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4876       : AAValueSimplifyFunction(IRP, A) {}
4877   /// See AbstractAttribute::trackStatistics()
4878   void trackStatistics() const override {
4879     STATS_DECLTRACK_CS_ATTR(value_simplify)
4880   }
4881 };
4882 
4883 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4884   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4885       : AAValueSimplifyReturned(IRP, A) {}
4886 
4887   /// See AbstractAttribute::manifest(...).
4888   ChangeStatus manifest(Attributor &A) override {
4889     return AAValueSimplifyImpl::manifest(A);
4890   }
4891 
4892   void trackStatistics() const override {
4893     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4894   }
4895 };
4896 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4897   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4898       : AAValueSimplifyFloating(IRP, A) {}
4899 
4900   /// See AbstractAttribute::manifest(...).
4901   ChangeStatus manifest(Attributor &A) override {
4902     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4903 
4904     if (SimplifiedAssociatedValue.hasValue() &&
4905         !SimplifiedAssociatedValue.getValue())
4906       return Changed;
4907 
4908     Value &V = getAssociatedValue();
4909     auto *C = SimplifiedAssociatedValue.hasValue()
4910                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4911                   : UndefValue::get(V.getType());
4912     if (C) {
4913       Use &U = cast<CallBase>(&getAnchorValue())
4914                    ->getArgOperandUse(getCallSiteArgNo());
4915       // We can replace the AssociatedValue with the constant.
4916       if (&V != C && V.getType() == C->getType()) {
4917         if (A.changeUseAfterManifest(U, *C))
4918           Changed = ChangeStatus::CHANGED;
4919       }
4920     }
4921 
4922     return Changed | AAValueSimplify::manifest(A);
4923   }
4924 
4925   void trackStatistics() const override {
4926     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4927   }
4928 };
4929 
4930 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4931 struct AAHeapToStackImpl : public AAHeapToStack {
4932   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4933       : AAHeapToStack(IRP, A) {}
4934 
4935   const std::string getAsStr() const override {
4936     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4937   }
4938 
4939   ChangeStatus manifest(Attributor &A) override {
4940     assert(getState().isValidState() &&
4941            "Attempted to manifest an invalid state!");
4942 
4943     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4944     Function *F = getAnchorScope();
4945     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4946 
4947     for (Instruction *MallocCall : MallocCalls) {
4948       // This malloc cannot be replaced.
4949       if (BadMallocCalls.count(MallocCall))
4950         continue;
4951 
4952       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4953         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4954         A.deleteAfterManifest(*FreeCall);
4955         HasChanged = ChangeStatus::CHANGED;
4956       }
4957 
4958       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4959                         << "\n");
4960 
4961       Align Alignment;
4962       Constant *Size;
4963       if (isCallocLikeFn(MallocCall, TLI)) {
4964         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4965         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4966         APInt TotalSize = SizeT->getValue() * Num->getValue();
4967         Size =
4968             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4969       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4970         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4971         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4972                                    ->getValue()
4973                                    .getZExtValue())
4974                         .valueOrOne();
4975       } else {
4976         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4977       }
4978 
4979       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4980       Instruction *AI =
4981           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4982                          "", MallocCall->getNextNode());
4983 
4984       if (AI->getType() != MallocCall->getType())
4985         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4986                              AI->getNextNode());
4987 
4988       A.changeValueAfterManifest(*MallocCall, *AI);
4989 
4990       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4991         auto *NBB = II->getNormalDest();
4992         BranchInst::Create(NBB, MallocCall->getParent());
4993         A.deleteAfterManifest(*MallocCall);
4994       } else {
4995         A.deleteAfterManifest(*MallocCall);
4996       }
4997 
4998       // Zero out the allocated memory if it was a calloc.
4999       if (isCallocLikeFn(MallocCall, TLI)) {
5000         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5001                                    AI->getNextNode());
5002         Value *Ops[] = {
5003             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5004             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5005 
5006         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5007         Module *M = F->getParent();
5008         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5009         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5010       }
5011       HasChanged = ChangeStatus::CHANGED;
5012     }
5013 
5014     return HasChanged;
5015   }
5016 
5017   /// Collection of all malloc calls in a function.
5018   SmallSetVector<Instruction *, 4> MallocCalls;
5019 
5020   /// Collection of malloc calls that cannot be converted.
5021   DenseSet<const Instruction *> BadMallocCalls;
5022 
5023   /// A map for each malloc call to the set of associated free calls.
5024   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5025 
5026   ChangeStatus updateImpl(Attributor &A) override;
5027 };
5028 
5029 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5030   const Function *F = getAnchorScope();
5031   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5032 
5033   MustBeExecutedContextExplorer &Explorer =
5034       A.getInfoCache().getMustBeExecutedContextExplorer();
5035 
5036   auto FreeCheck = [&](Instruction &I) {
5037     const auto &Frees = FreesForMalloc.lookup(&I);
5038     if (Frees.size() != 1)
5039       return false;
5040     Instruction *UniqueFree = *Frees.begin();
5041     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5042   };
5043 
5044   auto UsesCheck = [&](Instruction &I) {
5045     bool ValidUsesOnly = true;
5046     bool MustUse = true;
5047     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5048       Instruction *UserI = cast<Instruction>(U.getUser());
5049       if (isa<LoadInst>(UserI))
5050         return true;
5051       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5052         if (SI->getValueOperand() == U.get()) {
5053           LLVM_DEBUG(dbgs()
5054                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5055           ValidUsesOnly = false;
5056         } else {
5057           // A store into the malloc'ed memory is fine.
5058         }
5059         return true;
5060       }
5061       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5062         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5063           return true;
5064         // Record malloc.
5065         if (isFreeCall(UserI, TLI)) {
5066           if (MustUse) {
5067             FreesForMalloc[&I].insert(UserI);
5068           } else {
5069             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5070                               << *UserI << "\n");
5071             ValidUsesOnly = false;
5072           }
5073           return true;
5074         }
5075 
5076         unsigned ArgNo = CB->getArgOperandNo(&U);
5077 
5078         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5079             *this, IRPosition::callsite_argument(*CB, ArgNo));
5080 
5081         // If a callsite argument use is nofree, we are fine.
5082         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5083             *this, IRPosition::callsite_argument(*CB, ArgNo));
5084 
5085         if (!NoCaptureAA.isAssumedNoCapture() ||
5086             !ArgNoFreeAA.isAssumedNoFree()) {
5087           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5088           ValidUsesOnly = false;
5089         }
5090         return true;
5091       }
5092 
5093       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5094           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5095         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5096         Follow = true;
5097         return true;
5098       }
5099       // Unknown user for which we can not track uses further (in a way that
5100       // makes sense).
5101       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5102       ValidUsesOnly = false;
5103       return true;
5104     };
5105     A.checkForAllUses(Pred, *this, I);
5106     return ValidUsesOnly;
5107   };
5108 
5109   auto MallocCallocCheck = [&](Instruction &I) {
5110     if (BadMallocCalls.count(&I))
5111       return true;
5112 
5113     bool IsMalloc = isMallocLikeFn(&I, TLI);
5114     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5115     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5116     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5117       BadMallocCalls.insert(&I);
5118       return true;
5119     }
5120 
5121     if (IsMalloc) {
5122       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5123         if (Size->getValue().ule(MaxHeapToStackSize))
5124           if (UsesCheck(I) || FreeCheck(I)) {
5125             MallocCalls.insert(&I);
5126             return true;
5127           }
5128     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5129       // Only if the alignment and sizes are constant.
5130       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5131         if (Size->getValue().ule(MaxHeapToStackSize))
5132           if (UsesCheck(I) || FreeCheck(I)) {
5133             MallocCalls.insert(&I);
5134             return true;
5135           }
5136     } else if (IsCalloc) {
5137       bool Overflow = false;
5138       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5139         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5140           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5141                   .ule(MaxHeapToStackSize))
5142             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5143               MallocCalls.insert(&I);
5144               return true;
5145             }
5146     }
5147 
5148     BadMallocCalls.insert(&I);
5149     return true;
5150   };
5151 
5152   size_t NumBadMallocs = BadMallocCalls.size();
5153 
5154   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5155 
5156   if (NumBadMallocs != BadMallocCalls.size())
5157     return ChangeStatus::CHANGED;
5158 
5159   return ChangeStatus::UNCHANGED;
5160 }
5161 
5162 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5163   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5164       : AAHeapToStackImpl(IRP, A) {}
5165 
5166   /// See AbstractAttribute::trackStatistics().
5167   void trackStatistics() const override {
5168     STATS_DECL(
5169         MallocCalls, Function,
5170         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5171     for (auto *C : MallocCalls)
5172       if (!BadMallocCalls.count(C))
5173         ++BUILD_STAT_NAME(MallocCalls, Function);
5174   }
5175 };
5176 
5177 /// ----------------------- Privatizable Pointers ------------------------------
5178 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5179   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5180       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5181 
5182   ChangeStatus indicatePessimisticFixpoint() override {
5183     AAPrivatizablePtr::indicatePessimisticFixpoint();
5184     PrivatizableType = nullptr;
5185     return ChangeStatus::CHANGED;
5186   }
5187 
5188   /// Identify the type we can chose for a private copy of the underlying
5189   /// argument. None means it is not clear yet, nullptr means there is none.
5190   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5191 
5192   /// Return a privatizable type that encloses both T0 and T1.
5193   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5194   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5195     if (!T0.hasValue())
5196       return T1;
5197     if (!T1.hasValue())
5198       return T0;
5199     if (T0 == T1)
5200       return T0;
5201     return nullptr;
5202   }
5203 
5204   Optional<Type *> getPrivatizableType() const override {
5205     return PrivatizableType;
5206   }
5207 
5208   const std::string getAsStr() const override {
5209     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5210   }
5211 
5212 protected:
5213   Optional<Type *> PrivatizableType;
5214 };
5215 
5216 // TODO: Do this for call site arguments (probably also other values) as well.
5217 
5218 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5219   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5220       : AAPrivatizablePtrImpl(IRP, A) {}
5221 
5222   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5223   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5224     // If this is a byval argument and we know all the call sites (so we can
5225     // rewrite them), there is no need to check them explicitly.
5226     bool AllCallSitesKnown;
5227     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5228         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5229                                true, AllCallSitesKnown))
5230       return getAssociatedValue().getType()->getPointerElementType();
5231 
5232     Optional<Type *> Ty;
5233     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5234 
5235     // Make sure the associated call site argument has the same type at all call
5236     // sites and it is an allocation we know is safe to privatize, for now that
5237     // means we only allow alloca instructions.
5238     // TODO: We can additionally analyze the accesses in the callee to  create
5239     //       the type from that information instead. That is a little more
5240     //       involved and will be done in a follow up patch.
5241     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5242       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5243       // Check if a coresponding argument was found or if it is one not
5244       // associated (which can happen for callback calls).
5245       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5246         return false;
5247 
5248       // Check that all call sites agree on a type.
5249       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5250       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5251 
5252       LLVM_DEBUG({
5253         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5254         if (CSTy.hasValue() && CSTy.getValue())
5255           CSTy.getValue()->print(dbgs());
5256         else if (CSTy.hasValue())
5257           dbgs() << "<nullptr>";
5258         else
5259           dbgs() << "<none>";
5260       });
5261 
5262       Ty = combineTypes(Ty, CSTy);
5263 
5264       LLVM_DEBUG({
5265         dbgs() << " : New Type: ";
5266         if (Ty.hasValue() && Ty.getValue())
5267           Ty.getValue()->print(dbgs());
5268         else if (Ty.hasValue())
5269           dbgs() << "<nullptr>";
5270         else
5271           dbgs() << "<none>";
5272         dbgs() << "\n";
5273       });
5274 
5275       return !Ty.hasValue() || Ty.getValue();
5276     };
5277 
5278     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5279       return nullptr;
5280     return Ty;
5281   }
5282 
5283   /// See AbstractAttribute::updateImpl(...).
5284   ChangeStatus updateImpl(Attributor &A) override {
5285     PrivatizableType = identifyPrivatizableType(A);
5286     if (!PrivatizableType.hasValue())
5287       return ChangeStatus::UNCHANGED;
5288     if (!PrivatizableType.getValue())
5289       return indicatePessimisticFixpoint();
5290 
5291     // The dependence is optional so we don't give up once we give up on the
5292     // alignment.
5293     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5294                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5295 
5296     // Avoid arguments with padding for now.
5297     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5298         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5299                                                 A.getInfoCache().getDL())) {
5300       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5301       return indicatePessimisticFixpoint();
5302     }
5303 
5304     // Verify callee and caller agree on how the promoted argument would be
5305     // passed.
5306     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5307     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5308     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5309     Function &Fn = *getIRPosition().getAnchorScope();
5310     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5311     ArgsToPromote.insert(getAssociatedArgument());
5312     const auto *TTI =
5313         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5314     if (!TTI ||
5315         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5316             Fn, *TTI, ArgsToPromote, Dummy) ||
5317         ArgsToPromote.empty()) {
5318       LLVM_DEBUG(
5319           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5320                  << Fn.getName() << "\n");
5321       return indicatePessimisticFixpoint();
5322     }
5323 
5324     // Collect the types that will replace the privatizable type in the function
5325     // signature.
5326     SmallVector<Type *, 16> ReplacementTypes;
5327     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5328 
5329     // Register a rewrite of the argument.
5330     Argument *Arg = getAssociatedArgument();
5331     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5332       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5333       return indicatePessimisticFixpoint();
5334     }
5335 
5336     unsigned ArgNo = Arg->getArgNo();
5337 
5338     // Helper to check if for the given call site the associated argument is
5339     // passed to a callback where the privatization would be different.
5340     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5341       SmallVector<const Use *, 4> CallbackUses;
5342       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5343       for (const Use *U : CallbackUses) {
5344         AbstractCallSite CBACS(U);
5345         assert(CBACS && CBACS.isCallbackCall());
5346         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5347           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5348 
5349           LLVM_DEBUG({
5350             dbgs()
5351                 << "[AAPrivatizablePtr] Argument " << *Arg
5352                 << "check if can be privatized in the context of its parent ("
5353                 << Arg->getParent()->getName()
5354                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5355                    "callback ("
5356                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5357                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5358                 << CBACS.getCallArgOperand(CBArg) << " vs "
5359                 << CB.getArgOperand(ArgNo) << "\n"
5360                 << "[AAPrivatizablePtr] " << CBArg << " : "
5361                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5362           });
5363 
5364           if (CBArgNo != int(ArgNo))
5365             continue;
5366           const auto &CBArgPrivAA =
5367               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5368           if (CBArgPrivAA.isValidState()) {
5369             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5370             if (!CBArgPrivTy.hasValue())
5371               continue;
5372             if (CBArgPrivTy.getValue() == PrivatizableType)
5373               continue;
5374           }
5375 
5376           LLVM_DEBUG({
5377             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5378                    << " cannot be privatized in the context of its parent ("
5379                    << Arg->getParent()->getName()
5380                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5381                       "callback ("
5382                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5383                    << ").\n[AAPrivatizablePtr] for which the argument "
5384                       "privatization is not compatible.\n";
5385           });
5386           return false;
5387         }
5388       }
5389       return true;
5390     };
5391 
5392     // Helper to check if for the given call site the associated argument is
5393     // passed to a direct call where the privatization would be different.
5394     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5395       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5396       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5397       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5398              "Expected a direct call operand for callback call operand");
5399 
5400       LLVM_DEBUG({
5401         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5402                << " check if be privatized in the context of its parent ("
5403                << Arg->getParent()->getName()
5404                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5405                   "direct call of ("
5406                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5407                << ").\n";
5408       });
5409 
5410       Function *DCCallee = DC->getCalledFunction();
5411       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5412         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5413             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5414         if (DCArgPrivAA.isValidState()) {
5415           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5416           if (!DCArgPrivTy.hasValue())
5417             return true;
5418           if (DCArgPrivTy.getValue() == PrivatizableType)
5419             return true;
5420         }
5421       }
5422 
5423       LLVM_DEBUG({
5424         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5425                << " cannot be privatized in the context of its parent ("
5426                << Arg->getParent()->getName()
5427                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5428                   "direct call of ("
5429                << ACS.getInstruction()->getCalledFunction()->getName()
5430                << ").\n[AAPrivatizablePtr] for which the argument "
5431                   "privatization is not compatible.\n";
5432       });
5433       return false;
5434     };
5435 
5436     // Helper to check if the associated argument is used at the given abstract
5437     // call site in a way that is incompatible with the privatization assumed
5438     // here.
5439     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5440       if (ACS.isDirectCall())
5441         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5442       if (ACS.isCallbackCall())
5443         return IsCompatiblePrivArgOfDirectCS(ACS);
5444       return false;
5445     };
5446 
5447     bool AllCallSitesKnown;
5448     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5449                                 AllCallSitesKnown))
5450       return indicatePessimisticFixpoint();
5451 
5452     return ChangeStatus::UNCHANGED;
5453   }
5454 
5455   /// Given a type to private \p PrivType, collect the constituates (which are
5456   /// used) in \p ReplacementTypes.
5457   static void
5458   identifyReplacementTypes(Type *PrivType,
5459                            SmallVectorImpl<Type *> &ReplacementTypes) {
5460     // TODO: For now we expand the privatization type to the fullest which can
5461     //       lead to dead arguments that need to be removed later.
5462     assert(PrivType && "Expected privatizable type!");
5463 
5464     // Traverse the type, extract constituate types on the outermost level.
5465     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5466       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5467         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5468     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5469       ReplacementTypes.append(PrivArrayType->getNumElements(),
5470                               PrivArrayType->getElementType());
5471     } else {
5472       ReplacementTypes.push_back(PrivType);
5473     }
5474   }
5475 
5476   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5477   /// The values needed are taken from the arguments of \p F starting at
5478   /// position \p ArgNo.
5479   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5480                                    unsigned ArgNo, Instruction &IP) {
5481     assert(PrivType && "Expected privatizable type!");
5482 
5483     IRBuilder<NoFolder> IRB(&IP);
5484     const DataLayout &DL = F.getParent()->getDataLayout();
5485 
5486     // Traverse the type, build GEPs and stores.
5487     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5488       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5489       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5490         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5491         Value *Ptr = constructPointer(
5492             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5493         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5494       }
5495     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5496       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5497       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5498       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5499         Value *Ptr =
5500             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5501         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5502       }
5503     } else {
5504       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5505     }
5506   }
5507 
5508   /// Extract values from \p Base according to the type \p PrivType at the
5509   /// call position \p ACS. The values are appended to \p ReplacementValues.
5510   void createReplacementValues(Align Alignment, Type *PrivType,
5511                                AbstractCallSite ACS, Value *Base,
5512                                SmallVectorImpl<Value *> &ReplacementValues) {
5513     assert(Base && "Expected base value!");
5514     assert(PrivType && "Expected privatizable type!");
5515     Instruction *IP = ACS.getInstruction();
5516 
5517     IRBuilder<NoFolder> IRB(IP);
5518     const DataLayout &DL = IP->getModule()->getDataLayout();
5519 
5520     if (Base->getType()->getPointerElementType() != PrivType)
5521       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5522                                                  "", ACS.getInstruction());
5523 
5524     // Traverse the type, build GEPs and loads.
5525     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5526       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5527       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5528         Type *PointeeTy = PrivStructType->getElementType(u);
5529         Value *Ptr =
5530             constructPointer(PointeeTy->getPointerTo(), Base,
5531                              PrivStructLayout->getElementOffset(u), IRB, DL);
5532         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5533         L->setAlignment(Alignment);
5534         ReplacementValues.push_back(L);
5535       }
5536     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5537       Type *PointeeTy = PrivArrayType->getElementType();
5538       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5539       Type *PointeePtrTy = PointeeTy->getPointerTo();
5540       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5541         Value *Ptr =
5542             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5543         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5544         L->setAlignment(Alignment);
5545         ReplacementValues.push_back(L);
5546       }
5547     } else {
5548       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5549       L->setAlignment(Alignment);
5550       ReplacementValues.push_back(L);
5551     }
5552   }
5553 
5554   /// See AbstractAttribute::manifest(...)
5555   ChangeStatus manifest(Attributor &A) override {
5556     if (!PrivatizableType.hasValue())
5557       return ChangeStatus::UNCHANGED;
5558     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5559 
5560     // Collect all tail calls in the function as we cannot allow new allocas to
5561     // escape into tail recursion.
5562     // TODO: Be smarter about new allocas escaping into tail calls.
5563     SmallVector<CallInst *, 16> TailCalls;
5564     if (!A.checkForAllInstructions(
5565             [&](Instruction &I) {
5566               CallInst &CI = cast<CallInst>(I);
5567               if (CI.isTailCall())
5568                 TailCalls.push_back(&CI);
5569               return true;
5570             },
5571             *this, {Instruction::Call}))
5572       return ChangeStatus::UNCHANGED;
5573 
5574     Argument *Arg = getAssociatedArgument();
5575     // Query AAAlign attribute for alignment of associated argument to
5576     // determine the best alignment of loads.
5577     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5578 
5579     // Callback to repair the associated function. A new alloca is placed at the
5580     // beginning and initialized with the values passed through arguments. The
5581     // new alloca replaces the use of the old pointer argument.
5582     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5583         [=](const Attributor::ArgumentReplacementInfo &ARI,
5584             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5585           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5586           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5587           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5588                                     Arg->getName() + ".priv", IP);
5589           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5590                                ArgIt->getArgNo(), *IP);
5591           Arg->replaceAllUsesWith(AI);
5592 
5593           for (CallInst *CI : TailCalls)
5594             CI->setTailCall(false);
5595         };
5596 
5597     // Callback to repair a call site of the associated function. The elements
5598     // of the privatizable type are loaded prior to the call and passed to the
5599     // new function version.
5600     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5601         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5602                       AbstractCallSite ACS,
5603                       SmallVectorImpl<Value *> &NewArgOperands) {
5604           // When no alignment is specified for the load instruction,
5605           // natural alignment is assumed.
5606           createReplacementValues(
5607               assumeAligned(AlignAA.getAssumedAlign()),
5608               PrivatizableType.getValue(), ACS,
5609               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5610               NewArgOperands);
5611         };
5612 
5613     // Collect the types that will replace the privatizable type in the function
5614     // signature.
5615     SmallVector<Type *, 16> ReplacementTypes;
5616     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5617 
5618     // Register a rewrite of the argument.
5619     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5620                                            std::move(FnRepairCB),
5621                                            std::move(ACSRepairCB)))
5622       return ChangeStatus::CHANGED;
5623     return ChangeStatus::UNCHANGED;
5624   }
5625 
5626   /// See AbstractAttribute::trackStatistics()
5627   void trackStatistics() const override {
5628     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5629   }
5630 };
5631 
5632 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5633   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5634       : AAPrivatizablePtrImpl(IRP, A) {}
5635 
5636   /// See AbstractAttribute::initialize(...).
5637   virtual void initialize(Attributor &A) override {
5638     // TODO: We can privatize more than arguments.
5639     indicatePessimisticFixpoint();
5640   }
5641 
5642   ChangeStatus updateImpl(Attributor &A) override {
5643     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5644                      "updateImpl will not be called");
5645   }
5646 
5647   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5648   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5649     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5650     if (!Obj) {
5651       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5652       return nullptr;
5653     }
5654 
5655     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5656       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5657         if (CI->isOne())
5658           return Obj->getType()->getPointerElementType();
5659     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5660       auto &PrivArgAA =
5661           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5662       if (PrivArgAA.isAssumedPrivatizablePtr())
5663         return Obj->getType()->getPointerElementType();
5664     }
5665 
5666     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5667                          "alloca nor privatizable argument: "
5668                       << *Obj << "!\n");
5669     return nullptr;
5670   }
5671 
5672   /// See AbstractAttribute::trackStatistics()
5673   void trackStatistics() const override {
5674     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5675   }
5676 };
5677 
5678 struct AAPrivatizablePtrCallSiteArgument final
5679     : public AAPrivatizablePtrFloating {
5680   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5681       : AAPrivatizablePtrFloating(IRP, A) {}
5682 
5683   /// See AbstractAttribute::initialize(...).
5684   void initialize(Attributor &A) override {
5685     if (getIRPosition().hasAttr(Attribute::ByVal))
5686       indicateOptimisticFixpoint();
5687   }
5688 
5689   /// See AbstractAttribute::updateImpl(...).
5690   ChangeStatus updateImpl(Attributor &A) override {
5691     PrivatizableType = identifyPrivatizableType(A);
5692     if (!PrivatizableType.hasValue())
5693       return ChangeStatus::UNCHANGED;
5694     if (!PrivatizableType.getValue())
5695       return indicatePessimisticFixpoint();
5696 
5697     const IRPosition &IRP = getIRPosition();
5698     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5699     if (!NoCaptureAA.isAssumedNoCapture()) {
5700       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5701       return indicatePessimisticFixpoint();
5702     }
5703 
5704     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5705     if (!NoAliasAA.isAssumedNoAlias()) {
5706       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5707       return indicatePessimisticFixpoint();
5708     }
5709 
5710     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5711     if (!MemBehaviorAA.isAssumedReadOnly()) {
5712       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5713       return indicatePessimisticFixpoint();
5714     }
5715 
5716     return ChangeStatus::UNCHANGED;
5717   }
5718 
5719   /// See AbstractAttribute::trackStatistics()
5720   void trackStatistics() const override {
5721     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5722   }
5723 };
5724 
5725 struct AAPrivatizablePtrCallSiteReturned final
5726     : public AAPrivatizablePtrFloating {
5727   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5728       : AAPrivatizablePtrFloating(IRP, A) {}
5729 
5730   /// See AbstractAttribute::initialize(...).
5731   void initialize(Attributor &A) override {
5732     // TODO: We can privatize more than arguments.
5733     indicatePessimisticFixpoint();
5734   }
5735 
5736   /// See AbstractAttribute::trackStatistics()
5737   void trackStatistics() const override {
5738     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5739   }
5740 };
5741 
5742 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5743   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5744       : AAPrivatizablePtrFloating(IRP, A) {}
5745 
5746   /// See AbstractAttribute::initialize(...).
5747   void initialize(Attributor &A) override {
5748     // TODO: We can privatize more than arguments.
5749     indicatePessimisticFixpoint();
5750   }
5751 
5752   /// See AbstractAttribute::trackStatistics()
5753   void trackStatistics() const override {
5754     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5755   }
5756 };
5757 
5758 /// -------------------- Memory Behavior Attributes ----------------------------
5759 /// Includes read-none, read-only, and write-only.
5760 /// ----------------------------------------------------------------------------
5761 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5762   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5763       : AAMemoryBehavior(IRP, A) {}
5764 
5765   /// See AbstractAttribute::initialize(...).
5766   void initialize(Attributor &A) override {
5767     intersectAssumedBits(BEST_STATE);
5768     getKnownStateFromValue(getIRPosition(), getState());
5769     AAMemoryBehavior::initialize(A);
5770   }
5771 
5772   /// Return the memory behavior information encoded in the IR for \p IRP.
5773   static void getKnownStateFromValue(const IRPosition &IRP,
5774                                      BitIntegerState &State,
5775                                      bool IgnoreSubsumingPositions = false) {
5776     SmallVector<Attribute, 2> Attrs;
5777     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5778     for (const Attribute &Attr : Attrs) {
5779       switch (Attr.getKindAsEnum()) {
5780       case Attribute::ReadNone:
5781         State.addKnownBits(NO_ACCESSES);
5782         break;
5783       case Attribute::ReadOnly:
5784         State.addKnownBits(NO_WRITES);
5785         break;
5786       case Attribute::WriteOnly:
5787         State.addKnownBits(NO_READS);
5788         break;
5789       default:
5790         llvm_unreachable("Unexpected attribute!");
5791       }
5792     }
5793 
5794     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5795       if (!I->mayReadFromMemory())
5796         State.addKnownBits(NO_READS);
5797       if (!I->mayWriteToMemory())
5798         State.addKnownBits(NO_WRITES);
5799     }
5800   }
5801 
5802   /// See AbstractAttribute::getDeducedAttributes(...).
5803   void getDeducedAttributes(LLVMContext &Ctx,
5804                             SmallVectorImpl<Attribute> &Attrs) const override {
5805     assert(Attrs.size() == 0);
5806     if (isAssumedReadNone())
5807       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5808     else if (isAssumedReadOnly())
5809       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5810     else if (isAssumedWriteOnly())
5811       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5812     assert(Attrs.size() <= 1);
5813   }
5814 
5815   /// See AbstractAttribute::manifest(...).
5816   ChangeStatus manifest(Attributor &A) override {
5817     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5818       return ChangeStatus::UNCHANGED;
5819 
5820     const IRPosition &IRP = getIRPosition();
5821 
5822     // Check if we would improve the existing attributes first.
5823     SmallVector<Attribute, 4> DeducedAttrs;
5824     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5825     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5826           return IRP.hasAttr(Attr.getKindAsEnum(),
5827                              /* IgnoreSubsumingPositions */ true);
5828         }))
5829       return ChangeStatus::UNCHANGED;
5830 
5831     // Clear existing attributes.
5832     IRP.removeAttrs(AttrKinds);
5833 
5834     // Use the generic manifest method.
5835     return IRAttribute::manifest(A);
5836   }
5837 
5838   /// See AbstractState::getAsStr().
5839   const std::string getAsStr() const override {
5840     if (isAssumedReadNone())
5841       return "readnone";
5842     if (isAssumedReadOnly())
5843       return "readonly";
5844     if (isAssumedWriteOnly())
5845       return "writeonly";
5846     return "may-read/write";
5847   }
5848 
5849   /// The set of IR attributes AAMemoryBehavior deals with.
5850   static const Attribute::AttrKind AttrKinds[3];
5851 };
5852 
5853 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5854     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5855 
5856 /// Memory behavior attribute for a floating value.
5857 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5858   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5859       : AAMemoryBehaviorImpl(IRP, A) {}
5860 
5861   /// See AbstractAttribute::initialize(...).
5862   void initialize(Attributor &A) override {
5863     AAMemoryBehaviorImpl::initialize(A);
5864     // Initialize the use vector with all direct uses of the associated value.
5865     for (const Use &U : getAssociatedValue().uses())
5866       Uses.insert(&U);
5867   }
5868 
5869   /// See AbstractAttribute::updateImpl(...).
5870   ChangeStatus updateImpl(Attributor &A) override;
5871 
5872   /// See AbstractAttribute::trackStatistics()
5873   void trackStatistics() const override {
5874     if (isAssumedReadNone())
5875       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5876     else if (isAssumedReadOnly())
5877       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5878     else if (isAssumedWriteOnly())
5879       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5880   }
5881 
5882 private:
5883   /// Return true if users of \p UserI might access the underlying
5884   /// variable/location described by \p U and should therefore be analyzed.
5885   bool followUsersOfUseIn(Attributor &A, const Use *U,
5886                           const Instruction *UserI);
5887 
5888   /// Update the state according to the effect of use \p U in \p UserI.
5889   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5890 
5891 protected:
5892   /// Container for (transitive) uses of the associated argument.
5893   SetVector<const Use *> Uses;
5894 };
5895 
5896 /// Memory behavior attribute for function argument.
5897 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5898   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5899       : AAMemoryBehaviorFloating(IRP, A) {}
5900 
5901   /// See AbstractAttribute::initialize(...).
5902   void initialize(Attributor &A) override {
5903     intersectAssumedBits(BEST_STATE);
5904     const IRPosition &IRP = getIRPosition();
5905     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5906     // can query it when we use has/getAttr. That would allow us to reuse the
5907     // initialize of the base class here.
5908     bool HasByVal =
5909         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5910     getKnownStateFromValue(IRP, getState(),
5911                            /* IgnoreSubsumingPositions */ HasByVal);
5912 
5913     // Initialize the use vector with all direct uses of the associated value.
5914     Argument *Arg = getAssociatedArgument();
5915     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5916       indicatePessimisticFixpoint();
5917     } else {
5918       // Initialize the use vector with all direct uses of the associated value.
5919       for (const Use &U : Arg->uses())
5920         Uses.insert(&U);
5921     }
5922   }
5923 
5924   ChangeStatus manifest(Attributor &A) override {
5925     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5926     if (!getAssociatedValue().getType()->isPointerTy())
5927       return ChangeStatus::UNCHANGED;
5928 
5929     // TODO: From readattrs.ll: "inalloca parameters are always
5930     //                           considered written"
5931     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5932       removeKnownBits(NO_WRITES);
5933       removeAssumedBits(NO_WRITES);
5934     }
5935     return AAMemoryBehaviorFloating::manifest(A);
5936   }
5937 
5938   /// See AbstractAttribute::trackStatistics()
5939   void trackStatistics() const override {
5940     if (isAssumedReadNone())
5941       STATS_DECLTRACK_ARG_ATTR(readnone)
5942     else if (isAssumedReadOnly())
5943       STATS_DECLTRACK_ARG_ATTR(readonly)
5944     else if (isAssumedWriteOnly())
5945       STATS_DECLTRACK_ARG_ATTR(writeonly)
5946   }
5947 };
5948 
5949 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5950   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5951       : AAMemoryBehaviorArgument(IRP, A) {}
5952 
5953   /// See AbstractAttribute::initialize(...).
5954   void initialize(Attributor &A) override {
5955     // If we don't have an associated attribute this is either a variadic call
5956     // or an indirect call, either way, nothing to do here.
5957     Argument *Arg = getAssociatedArgument();
5958     if (!Arg) {
5959       indicatePessimisticFixpoint();
5960       return;
5961     }
5962     if (Arg->hasByValAttr()) {
5963       addKnownBits(NO_WRITES);
5964       removeKnownBits(NO_READS);
5965       removeAssumedBits(NO_READS);
5966     }
5967     AAMemoryBehaviorArgument::initialize(A);
5968     if (getAssociatedFunction()->isDeclaration())
5969       indicatePessimisticFixpoint();
5970   }
5971 
5972   /// See AbstractAttribute::updateImpl(...).
5973   ChangeStatus updateImpl(Attributor &A) override {
5974     // TODO: Once we have call site specific value information we can provide
5975     //       call site specific liveness liveness information and then it makes
5976     //       sense to specialize attributes for call sites arguments instead of
5977     //       redirecting requests to the callee argument.
5978     Argument *Arg = getAssociatedArgument();
5979     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5980     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5981     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5982   }
5983 
5984   /// See AbstractAttribute::trackStatistics()
5985   void trackStatistics() const override {
5986     if (isAssumedReadNone())
5987       STATS_DECLTRACK_CSARG_ATTR(readnone)
5988     else if (isAssumedReadOnly())
5989       STATS_DECLTRACK_CSARG_ATTR(readonly)
5990     else if (isAssumedWriteOnly())
5991       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5992   }
5993 };
5994 
5995 /// Memory behavior attribute for a call site return position.
5996 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5997   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5998       : AAMemoryBehaviorFloating(IRP, A) {}
5999 
6000   /// See AbstractAttribute::initialize(...).
6001   void initialize(Attributor &A) override {
6002     AAMemoryBehaviorImpl::initialize(A);
6003     Function *F = getAssociatedFunction();
6004     if (!F || F->isDeclaration())
6005       indicatePessimisticFixpoint();
6006   }
6007 
6008   /// See AbstractAttribute::manifest(...).
6009   ChangeStatus manifest(Attributor &A) override {
6010     // We do not annotate returned values.
6011     return ChangeStatus::UNCHANGED;
6012   }
6013 
6014   /// See AbstractAttribute::trackStatistics()
6015   void trackStatistics() const override {}
6016 };
6017 
6018 /// An AA to represent the memory behavior function attributes.
6019 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6020   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6021       : AAMemoryBehaviorImpl(IRP, A) {}
6022 
6023   /// See AbstractAttribute::updateImpl(Attributor &A).
6024   virtual ChangeStatus updateImpl(Attributor &A) override;
6025 
6026   /// See AbstractAttribute::manifest(...).
6027   ChangeStatus manifest(Attributor &A) override {
6028     Function &F = cast<Function>(getAnchorValue());
6029     if (isAssumedReadNone()) {
6030       F.removeFnAttr(Attribute::ArgMemOnly);
6031       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6032       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6033     }
6034     return AAMemoryBehaviorImpl::manifest(A);
6035   }
6036 
6037   /// See AbstractAttribute::trackStatistics()
6038   void trackStatistics() const override {
6039     if (isAssumedReadNone())
6040       STATS_DECLTRACK_FN_ATTR(readnone)
6041     else if (isAssumedReadOnly())
6042       STATS_DECLTRACK_FN_ATTR(readonly)
6043     else if (isAssumedWriteOnly())
6044       STATS_DECLTRACK_FN_ATTR(writeonly)
6045   }
6046 };
6047 
6048 /// AAMemoryBehavior attribute for call sites.
6049 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6050   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6051       : AAMemoryBehaviorImpl(IRP, A) {}
6052 
6053   /// See AbstractAttribute::initialize(...).
6054   void initialize(Attributor &A) override {
6055     AAMemoryBehaviorImpl::initialize(A);
6056     Function *F = getAssociatedFunction();
6057     if (!F || F->isDeclaration())
6058       indicatePessimisticFixpoint();
6059   }
6060 
6061   /// See AbstractAttribute::updateImpl(...).
6062   ChangeStatus updateImpl(Attributor &A) override {
6063     // TODO: Once we have call site specific value information we can provide
6064     //       call site specific liveness liveness information and then it makes
6065     //       sense to specialize attributes for call sites arguments instead of
6066     //       redirecting requests to the callee argument.
6067     Function *F = getAssociatedFunction();
6068     const IRPosition &FnPos = IRPosition::function(*F);
6069     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
6070     return clampStateAndIndicateChange(getState(), FnAA.getState());
6071   }
6072 
6073   /// See AbstractAttribute::trackStatistics()
6074   void trackStatistics() const override {
6075     if (isAssumedReadNone())
6076       STATS_DECLTRACK_CS_ATTR(readnone)
6077     else if (isAssumedReadOnly())
6078       STATS_DECLTRACK_CS_ATTR(readonly)
6079     else if (isAssumedWriteOnly())
6080       STATS_DECLTRACK_CS_ATTR(writeonly)
6081   }
6082 };
6083 
6084 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6085 
6086   // The current assumed state used to determine a change.
6087   auto AssumedState = getAssumed();
6088 
6089   auto CheckRWInst = [&](Instruction &I) {
6090     // If the instruction has an own memory behavior state, use it to restrict
6091     // the local state. No further analysis is required as the other memory
6092     // state is as optimistic as it gets.
6093     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6094       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6095           *this, IRPosition::callsite_function(*CB));
6096       intersectAssumedBits(MemBehaviorAA.getAssumed());
6097       return !isAtFixpoint();
6098     }
6099 
6100     // Remove access kind modifiers if necessary.
6101     if (I.mayReadFromMemory())
6102       removeAssumedBits(NO_READS);
6103     if (I.mayWriteToMemory())
6104       removeAssumedBits(NO_WRITES);
6105     return !isAtFixpoint();
6106   };
6107 
6108   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6109     return indicatePessimisticFixpoint();
6110 
6111   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6112                                         : ChangeStatus::UNCHANGED;
6113 }
6114 
6115 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6116 
6117   const IRPosition &IRP = getIRPosition();
6118   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6119   AAMemoryBehavior::StateType &S = getState();
6120 
6121   // First, check the function scope. We take the known information and we avoid
6122   // work if the assumed information implies the current assumed information for
6123   // this attribute. This is a valid for all but byval arguments.
6124   Argument *Arg = IRP.getAssociatedArgument();
6125   AAMemoryBehavior::base_t FnMemAssumedState =
6126       AAMemoryBehavior::StateType::getWorstState();
6127   if (!Arg || !Arg->hasByValAttr()) {
6128     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6129         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6130     FnMemAssumedState = FnMemAA.getAssumed();
6131     S.addKnownBits(FnMemAA.getKnown());
6132     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6133       return ChangeStatus::UNCHANGED;
6134   }
6135 
6136   // Make sure the value is not captured (except through "return"), if
6137   // it is, any information derived would be irrelevant anyway as we cannot
6138   // check the potential aliases introduced by the capture. However, no need
6139   // to fall back to anythign less optimistic than the function state.
6140   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6141       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6142   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6143     S.intersectAssumedBits(FnMemAssumedState);
6144     return ChangeStatus::CHANGED;
6145   }
6146 
6147   // The current assumed state used to determine a change.
6148   auto AssumedState = S.getAssumed();
6149 
6150   // Liveness information to exclude dead users.
6151   // TODO: Take the FnPos once we have call site specific liveness information.
6152   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6153       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6154       /* TrackDependence */ false);
6155 
6156   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6157   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6158     const Use *U = Uses[i];
6159     Instruction *UserI = cast<Instruction>(U->getUser());
6160     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6161                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6162                       << "]\n");
6163     if (A.isAssumedDead(*U, this, &LivenessAA))
6164       continue;
6165 
6166     // Droppable users, e.g., llvm::assume does not actually perform any action.
6167     if (UserI->isDroppable())
6168       continue;
6169 
6170     // Check if the users of UserI should also be visited.
6171     if (followUsersOfUseIn(A, U, UserI))
6172       for (const Use &UserIUse : UserI->uses())
6173         Uses.insert(&UserIUse);
6174 
6175     // If UserI might touch memory we analyze the use in detail.
6176     if (UserI->mayReadOrWriteMemory())
6177       analyzeUseIn(A, U, UserI);
6178   }
6179 
6180   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6181                                         : ChangeStatus::UNCHANGED;
6182 }
6183 
6184 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6185                                                   const Instruction *UserI) {
6186   // The loaded value is unrelated to the pointer argument, no need to
6187   // follow the users of the load.
6188   if (isa<LoadInst>(UserI))
6189     return false;
6190 
6191   // By default we follow all uses assuming UserI might leak information on U,
6192   // we have special handling for call sites operands though.
6193   const auto *CB = dyn_cast<CallBase>(UserI);
6194   if (!CB || !CB->isArgOperand(U))
6195     return true;
6196 
6197   // If the use is a call argument known not to be captured, the users of
6198   // the call do not need to be visited because they have to be unrelated to
6199   // the input. Note that this check is not trivial even though we disallow
6200   // general capturing of the underlying argument. The reason is that the
6201   // call might the argument "through return", which we allow and for which we
6202   // need to check call users.
6203   if (U->get()->getType()->isPointerTy()) {
6204     unsigned ArgNo = CB->getArgOperandNo(U);
6205     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6206         *this, IRPosition::callsite_argument(*CB, ArgNo),
6207         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6208     return !ArgNoCaptureAA.isAssumedNoCapture();
6209   }
6210 
6211   return true;
6212 }
6213 
6214 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6215                                             const Instruction *UserI) {
6216   assert(UserI->mayReadOrWriteMemory());
6217 
6218   switch (UserI->getOpcode()) {
6219   default:
6220     // TODO: Handle all atomics and other side-effect operations we know of.
6221     break;
6222   case Instruction::Load:
6223     // Loads cause the NO_READS property to disappear.
6224     removeAssumedBits(NO_READS);
6225     return;
6226 
6227   case Instruction::Store:
6228     // Stores cause the NO_WRITES property to disappear if the use is the
6229     // pointer operand. Note that we do assume that capturing was taken care of
6230     // somewhere else.
6231     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6232       removeAssumedBits(NO_WRITES);
6233     return;
6234 
6235   case Instruction::Call:
6236   case Instruction::CallBr:
6237   case Instruction::Invoke: {
6238     // For call sites we look at the argument memory behavior attribute (this
6239     // could be recursive!) in order to restrict our own state.
6240     const auto *CB = cast<CallBase>(UserI);
6241 
6242     // Give up on operand bundles.
6243     if (CB->isBundleOperand(U)) {
6244       indicatePessimisticFixpoint();
6245       return;
6246     }
6247 
6248     // Calling a function does read the function pointer, maybe write it if the
6249     // function is self-modifying.
6250     if (CB->isCallee(U)) {
6251       removeAssumedBits(NO_READS);
6252       break;
6253     }
6254 
6255     // Adjust the possible access behavior based on the information on the
6256     // argument.
6257     IRPosition Pos;
6258     if (U->get()->getType()->isPointerTy())
6259       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6260     else
6261       Pos = IRPosition::callsite_function(*CB);
6262     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6263         *this, Pos,
6264         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6265     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6266     // and at least "known".
6267     intersectAssumedBits(MemBehaviorAA.getAssumed());
6268     return;
6269   }
6270   };
6271 
6272   // Generally, look at the "may-properties" and adjust the assumed state if we
6273   // did not trigger special handling before.
6274   if (UserI->mayReadFromMemory())
6275     removeAssumedBits(NO_READS);
6276   if (UserI->mayWriteToMemory())
6277     removeAssumedBits(NO_WRITES);
6278 }
6279 
6280 } // namespace
6281 
6282 /// -------------------- Memory Locations Attributes ---------------------------
6283 /// Includes read-none, argmemonly, inaccessiblememonly,
6284 /// inaccessiblememorargmemonly
6285 /// ----------------------------------------------------------------------------
6286 
6287 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6288     AAMemoryLocation::MemoryLocationsKind MLK) {
6289   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6290     return "all memory";
6291   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6292     return "no memory";
6293   std::string S = "memory:";
6294   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6295     S += "stack,";
6296   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6297     S += "constant,";
6298   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6299     S += "internal global,";
6300   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6301     S += "external global,";
6302   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6303     S += "argument,";
6304   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6305     S += "inaccessible,";
6306   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6307     S += "malloced,";
6308   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6309     S += "unknown,";
6310   S.pop_back();
6311   return S;
6312 }
6313 
6314 namespace {
6315 struct AAMemoryLocationImpl : public AAMemoryLocation {
6316 
6317   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6318       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6319     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6320       AccessKind2Accesses[u] = nullptr;
6321   }
6322 
6323   ~AAMemoryLocationImpl() {
6324     // The AccessSets are allocated via a BumpPtrAllocator, we call
6325     // the destructor manually.
6326     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6327       if (AccessKind2Accesses[u])
6328         AccessKind2Accesses[u]->~AccessSet();
6329   }
6330 
6331   /// See AbstractAttribute::initialize(...).
6332   void initialize(Attributor &A) override {
6333     intersectAssumedBits(BEST_STATE);
6334     getKnownStateFromValue(A, getIRPosition(), getState());
6335     AAMemoryLocation::initialize(A);
6336   }
6337 
6338   /// Return the memory behavior information encoded in the IR for \p IRP.
6339   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6340                                      BitIntegerState &State,
6341                                      bool IgnoreSubsumingPositions = false) {
6342     // For internal functions we ignore `argmemonly` and
6343     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6344     // constant propagation. It is unclear if this is the best way but it is
6345     // unlikely this will cause real performance problems. If we are deriving
6346     // attributes for the anchor function we even remove the attribute in
6347     // addition to ignoring it.
6348     bool UseArgMemOnly = true;
6349     Function *AnchorFn = IRP.getAnchorScope();
6350     if (AnchorFn && A.isRunOn(*AnchorFn))
6351       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6352 
6353     SmallVector<Attribute, 2> Attrs;
6354     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6355     for (const Attribute &Attr : Attrs) {
6356       switch (Attr.getKindAsEnum()) {
6357       case Attribute::ReadNone:
6358         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6359         break;
6360       case Attribute::InaccessibleMemOnly:
6361         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6362         break;
6363       case Attribute::ArgMemOnly:
6364         if (UseArgMemOnly)
6365           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6366         else
6367           IRP.removeAttrs({Attribute::ArgMemOnly});
6368         break;
6369       case Attribute::InaccessibleMemOrArgMemOnly:
6370         if (UseArgMemOnly)
6371           State.addKnownBits(inverseLocation(
6372               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6373         else
6374           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6375         break;
6376       default:
6377         llvm_unreachable("Unexpected attribute!");
6378       }
6379     }
6380   }
6381 
6382   /// See AbstractAttribute::getDeducedAttributes(...).
6383   void getDeducedAttributes(LLVMContext &Ctx,
6384                             SmallVectorImpl<Attribute> &Attrs) const override {
6385     assert(Attrs.size() == 0);
6386     if (isAssumedReadNone()) {
6387       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6388     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6389       if (isAssumedInaccessibleMemOnly())
6390         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6391       else if (isAssumedArgMemOnly())
6392         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6393       else if (isAssumedInaccessibleOrArgMemOnly())
6394         Attrs.push_back(
6395             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6396     }
6397     assert(Attrs.size() <= 1);
6398   }
6399 
6400   /// See AbstractAttribute::manifest(...).
6401   ChangeStatus manifest(Attributor &A) override {
6402     const IRPosition &IRP = getIRPosition();
6403 
6404     // Check if we would improve the existing attributes first.
6405     SmallVector<Attribute, 4> DeducedAttrs;
6406     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6407     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6408           return IRP.hasAttr(Attr.getKindAsEnum(),
6409                              /* IgnoreSubsumingPositions */ true);
6410         }))
6411       return ChangeStatus::UNCHANGED;
6412 
6413     // Clear existing attributes.
6414     IRP.removeAttrs(AttrKinds);
6415     if (isAssumedReadNone())
6416       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6417 
6418     // Use the generic manifest method.
6419     return IRAttribute::manifest(A);
6420   }
6421 
6422   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6423   bool checkForAllAccessesToMemoryKind(
6424       function_ref<bool(const Instruction *, const Value *, AccessKind,
6425                         MemoryLocationsKind)>
6426           Pred,
6427       MemoryLocationsKind RequestedMLK) const override {
6428     if (!isValidState())
6429       return false;
6430 
6431     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6432     if (AssumedMLK == NO_LOCATIONS)
6433       return true;
6434 
6435     unsigned Idx = 0;
6436     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6437          CurMLK *= 2, ++Idx) {
6438       if (CurMLK & RequestedMLK)
6439         continue;
6440 
6441       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6442         for (const AccessInfo &AI : *Accesses)
6443           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6444             return false;
6445     }
6446 
6447     return true;
6448   }
6449 
6450   ChangeStatus indicatePessimisticFixpoint() override {
6451     // If we give up and indicate a pessimistic fixpoint this instruction will
6452     // become an access for all potential access kinds:
6453     // TODO: Add pointers for argmemonly and globals to improve the results of
6454     //       checkForAllAccessesToMemoryKind.
6455     bool Changed = false;
6456     MemoryLocationsKind KnownMLK = getKnown();
6457     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6458     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6459       if (!(CurMLK & KnownMLK))
6460         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6461                                   getAccessKindFromInst(I));
6462     return AAMemoryLocation::indicatePessimisticFixpoint();
6463   }
6464 
6465 protected:
6466   /// Helper struct to tie together an instruction that has a read or write
6467   /// effect with the pointer it accesses (if any).
6468   struct AccessInfo {
6469 
6470     /// The instruction that caused the access.
6471     const Instruction *I;
6472 
6473     /// The base pointer that is accessed, or null if unknown.
6474     const Value *Ptr;
6475 
6476     /// The kind of access (read/write/read+write).
6477     AccessKind Kind;
6478 
6479     bool operator==(const AccessInfo &RHS) const {
6480       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6481     }
6482     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6483       if (LHS.I != RHS.I)
6484         return LHS.I < RHS.I;
6485       if (LHS.Ptr != RHS.Ptr)
6486         return LHS.Ptr < RHS.Ptr;
6487       if (LHS.Kind != RHS.Kind)
6488         return LHS.Kind < RHS.Kind;
6489       return false;
6490     }
6491   };
6492 
6493   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6494   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6495   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6496   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6497 
6498   /// Categorize the pointer arguments of CB that might access memory in
6499   /// AccessedLoc and update the state and access map accordingly.
6500   void
6501   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6502                                      AAMemoryLocation::StateType &AccessedLocs,
6503                                      bool &Changed);
6504 
6505   /// Return the kind(s) of location that may be accessed by \p V.
6506   AAMemoryLocation::MemoryLocationsKind
6507   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6508 
6509   /// Return the access kind as determined by \p I.
6510   AccessKind getAccessKindFromInst(const Instruction *I) {
6511     AccessKind AK = READ_WRITE;
6512     if (I) {
6513       AK = I->mayReadFromMemory() ? READ : NONE;
6514       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6515     }
6516     return AK;
6517   }
6518 
6519   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6520   /// an access of kind \p AK to a \p MLK memory location with the access
6521   /// pointer \p Ptr.
6522   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6523                                  MemoryLocationsKind MLK, const Instruction *I,
6524                                  const Value *Ptr, bool &Changed,
6525                                  AccessKind AK = READ_WRITE) {
6526 
6527     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6528     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6529     if (!Accesses)
6530       Accesses = new (Allocator) AccessSet();
6531     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6532     State.removeAssumedBits(MLK);
6533   }
6534 
6535   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6536   /// arguments, and update the state and access map accordingly.
6537   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6538                           AAMemoryLocation::StateType &State, bool &Changed);
6539 
6540   /// Used to allocate access sets.
6541   BumpPtrAllocator &Allocator;
6542 
6543   /// The set of IR attributes AAMemoryLocation deals with.
6544   static const Attribute::AttrKind AttrKinds[4];
6545 };
6546 
6547 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6548     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6549     Attribute::InaccessibleMemOrArgMemOnly};
6550 
6551 void AAMemoryLocationImpl::categorizePtrValue(
6552     Attributor &A, const Instruction &I, const Value &Ptr,
6553     AAMemoryLocation::StateType &State, bool &Changed) {
6554   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6555                     << Ptr << " ["
6556                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6557 
6558   auto StripGEPCB = [](Value *V) -> Value * {
6559     auto *GEP = dyn_cast<GEPOperator>(V);
6560     while (GEP) {
6561       V = GEP->getPointerOperand();
6562       GEP = dyn_cast<GEPOperator>(V);
6563     }
6564     return V;
6565   };
6566 
6567   auto VisitValueCB = [&](Value &V, const Instruction *,
6568                           AAMemoryLocation::StateType &T,
6569                           bool Stripped) -> bool {
6570     MemoryLocationsKind MLK = NO_LOCATIONS;
6571     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6572     if (isa<UndefValue>(V))
6573       return true;
6574     if (auto *Arg = dyn_cast<Argument>(&V)) {
6575       if (Arg->hasByValAttr())
6576         MLK = NO_LOCAL_MEM;
6577       else
6578         MLK = NO_ARGUMENT_MEM;
6579     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6580       if (GV->hasLocalLinkage())
6581         MLK = NO_GLOBAL_INTERNAL_MEM;
6582       else
6583         MLK = NO_GLOBAL_EXTERNAL_MEM;
6584     } else if (isa<ConstantPointerNull>(V) &&
6585                !NullPointerIsDefined(getAssociatedFunction(),
6586                                      V.getType()->getPointerAddressSpace())) {
6587       return true;
6588     } else if (isa<AllocaInst>(V)) {
6589       MLK = NO_LOCAL_MEM;
6590     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6591       const auto &NoAliasAA =
6592           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6593       if (NoAliasAA.isAssumedNoAlias())
6594         MLK = NO_MALLOCED_MEM;
6595       else
6596         MLK = NO_UNKOWN_MEM;
6597     } else {
6598       MLK = NO_UNKOWN_MEM;
6599     }
6600 
6601     assert(MLK != NO_LOCATIONS && "No location specified!");
6602     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6603                               getAccessKindFromInst(&I));
6604     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6605                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6606                       << "\n");
6607     return true;
6608   };
6609 
6610   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6611           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6612           /* UseValueSimplify */ true,
6613           /* MaxValues */ 32, StripGEPCB)) {
6614     LLVM_DEBUG(
6615         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6616     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6617                               getAccessKindFromInst(&I));
6618   } else {
6619     LLVM_DEBUG(
6620         dbgs()
6621         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6622         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6623   }
6624 }
6625 
6626 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6627     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6628     bool &Changed) {
6629   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6630 
6631     // Skip non-pointer arguments.
6632     const Value *ArgOp = CB.getArgOperand(ArgNo);
6633     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6634       continue;
6635 
6636     // Skip readnone arguments.
6637     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6638     const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6639         *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6640 
6641     if (ArgOpMemLocationAA.isAssumedReadNone())
6642       continue;
6643 
6644     // Categorize potentially accessed pointer arguments as if there was an
6645     // access instruction with them as pointer.
6646     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6647   }
6648 }
6649 
6650 AAMemoryLocation::MemoryLocationsKind
6651 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6652                                                   bool &Changed) {
6653   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6654                     << I << "\n");
6655 
6656   AAMemoryLocation::StateType AccessedLocs;
6657   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6658 
6659   if (auto *CB = dyn_cast<CallBase>(&I)) {
6660 
6661     // First check if we assume any memory is access is visible.
6662     const auto &CBMemLocationAA =
6663         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6664     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6665                       << " [" << CBMemLocationAA << "]\n");
6666 
6667     if (CBMemLocationAA.isAssumedReadNone())
6668       return NO_LOCATIONS;
6669 
6670     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6671       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6672                                 Changed, getAccessKindFromInst(&I));
6673       return AccessedLocs.getAssumed();
6674     }
6675 
6676     uint32_t CBAssumedNotAccessedLocs =
6677         CBMemLocationAA.getAssumedNotAccessedLocation();
6678 
6679     // Set the argmemonly and global bit as we handle them separately below.
6680     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6681         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6682 
6683     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6684       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6685         continue;
6686       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6687                                 getAccessKindFromInst(&I));
6688     }
6689 
6690     // Now handle global memory if it might be accessed. This is slightly tricky
6691     // as NO_GLOBAL_MEM has multiple bits set.
6692     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6693     if (HasGlobalAccesses) {
6694       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6695                             AccessKind Kind, MemoryLocationsKind MLK) {
6696         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6697                                   getAccessKindFromInst(&I));
6698         return true;
6699       };
6700       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6701               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6702         return AccessedLocs.getWorstState();
6703     }
6704 
6705     LLVM_DEBUG(
6706         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6707                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6708 
6709     // Now handle argument memory if it might be accessed.
6710     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6711     if (HasArgAccesses)
6712       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6713 
6714     LLVM_DEBUG(
6715         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6716                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6717 
6718     return AccessedLocs.getAssumed();
6719   }
6720 
6721   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6722     LLVM_DEBUG(
6723         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6724                << I << " [" << *Ptr << "]\n");
6725     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6726     return AccessedLocs.getAssumed();
6727   }
6728 
6729   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6730                     << I << "\n");
6731   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6732                             getAccessKindFromInst(&I));
6733   return AccessedLocs.getAssumed();
6734 }
6735 
6736 /// An AA to represent the memory behavior function attributes.
6737 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6738   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6739       : AAMemoryLocationImpl(IRP, A) {}
6740 
6741   /// See AbstractAttribute::updateImpl(Attributor &A).
6742   virtual ChangeStatus updateImpl(Attributor &A) override {
6743 
6744     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6745         *this, getIRPosition(), /* TrackDependence */ false);
6746     if (MemBehaviorAA.isAssumedReadNone()) {
6747       if (MemBehaviorAA.isKnownReadNone())
6748         return indicateOptimisticFixpoint();
6749       assert(isAssumedReadNone() &&
6750              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6751       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6752       return ChangeStatus::UNCHANGED;
6753     }
6754 
6755     // The current assumed state used to determine a change.
6756     auto AssumedState = getAssumed();
6757     bool Changed = false;
6758 
6759     auto CheckRWInst = [&](Instruction &I) {
6760       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6761       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6762                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6763       removeAssumedBits(inverseLocation(MLK, false, false));
6764       // Stop once only the valid bit set in the *not assumed location*, thus
6765       // once we don't actually exclude any memory locations in the state.
6766       return getAssumedNotAccessedLocation() != VALID_STATE;
6767     };
6768 
6769     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6770       return indicatePessimisticFixpoint();
6771 
6772     Changed |= AssumedState != getAssumed();
6773     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6774   }
6775 
6776   /// See AbstractAttribute::trackStatistics()
6777   void trackStatistics() const override {
6778     if (isAssumedReadNone())
6779       STATS_DECLTRACK_FN_ATTR(readnone)
6780     else if (isAssumedArgMemOnly())
6781       STATS_DECLTRACK_FN_ATTR(argmemonly)
6782     else if (isAssumedInaccessibleMemOnly())
6783       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6784     else if (isAssumedInaccessibleOrArgMemOnly())
6785       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6786   }
6787 };
6788 
6789 /// AAMemoryLocation attribute for call sites.
6790 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6791   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6792       : AAMemoryLocationImpl(IRP, A) {}
6793 
6794   /// See AbstractAttribute::initialize(...).
6795   void initialize(Attributor &A) override {
6796     AAMemoryLocationImpl::initialize(A);
6797     Function *F = getAssociatedFunction();
6798     if (!F || F->isDeclaration())
6799       indicatePessimisticFixpoint();
6800   }
6801 
6802   /// See AbstractAttribute::updateImpl(...).
6803   ChangeStatus updateImpl(Attributor &A) override {
6804     // TODO: Once we have call site specific value information we can provide
6805     //       call site specific liveness liveness information and then it makes
6806     //       sense to specialize attributes for call sites arguments instead of
6807     //       redirecting requests to the callee argument.
6808     Function *F = getAssociatedFunction();
6809     const IRPosition &FnPos = IRPosition::function(*F);
6810     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6811     bool Changed = false;
6812     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6813                           AccessKind Kind, MemoryLocationsKind MLK) {
6814       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6815                                 getAccessKindFromInst(I));
6816       return true;
6817     };
6818     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6819       return indicatePessimisticFixpoint();
6820     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6821   }
6822 
6823   /// See AbstractAttribute::trackStatistics()
6824   void trackStatistics() const override {
6825     if (isAssumedReadNone())
6826       STATS_DECLTRACK_CS_ATTR(readnone)
6827   }
6828 };
6829 
6830 /// ------------------ Value Constant Range Attribute -------------------------
6831 
6832 struct AAValueConstantRangeImpl : AAValueConstantRange {
6833   using StateType = IntegerRangeState;
6834   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6835       : AAValueConstantRange(IRP, A) {}
6836 
6837   /// See AbstractAttribute::getAsStr().
6838   const std::string getAsStr() const override {
6839     std::string Str;
6840     llvm::raw_string_ostream OS(Str);
6841     OS << "range(" << getBitWidth() << ")<";
6842     getKnown().print(OS);
6843     OS << " / ";
6844     getAssumed().print(OS);
6845     OS << ">";
6846     return OS.str();
6847   }
6848 
6849   /// Helper function to get a SCEV expr for the associated value at program
6850   /// point \p I.
6851   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6852     if (!getAnchorScope())
6853       return nullptr;
6854 
6855     ScalarEvolution *SE =
6856         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6857             *getAnchorScope());
6858 
6859     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6860         *getAnchorScope());
6861 
6862     if (!SE || !LI)
6863       return nullptr;
6864 
6865     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6866     if (!I)
6867       return S;
6868 
6869     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6870   }
6871 
6872   /// Helper function to get a range from SCEV for the associated value at
6873   /// program point \p I.
6874   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6875                                          const Instruction *I = nullptr) const {
6876     if (!getAnchorScope())
6877       return getWorstState(getBitWidth());
6878 
6879     ScalarEvolution *SE =
6880         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6881             *getAnchorScope());
6882 
6883     const SCEV *S = getSCEV(A, I);
6884     if (!SE || !S)
6885       return getWorstState(getBitWidth());
6886 
6887     return SE->getUnsignedRange(S);
6888   }
6889 
6890   /// Helper function to get a range from LVI for the associated value at
6891   /// program point \p I.
6892   ConstantRange
6893   getConstantRangeFromLVI(Attributor &A,
6894                           const Instruction *CtxI = nullptr) const {
6895     if (!getAnchorScope())
6896       return getWorstState(getBitWidth());
6897 
6898     LazyValueInfo *LVI =
6899         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6900             *getAnchorScope());
6901 
6902     if (!LVI || !CtxI)
6903       return getWorstState(getBitWidth());
6904     return LVI->getConstantRange(&getAssociatedValue(),
6905                                  const_cast<BasicBlock *>(CtxI->getParent()),
6906                                  const_cast<Instruction *>(CtxI));
6907   }
6908 
6909   /// See AAValueConstantRange::getKnownConstantRange(..).
6910   ConstantRange
6911   getKnownConstantRange(Attributor &A,
6912                         const Instruction *CtxI = nullptr) const override {
6913     if (!CtxI || CtxI == getCtxI())
6914       return getKnown();
6915 
6916     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6917     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6918     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6919   }
6920 
6921   /// See AAValueConstantRange::getAssumedConstantRange(..).
6922   ConstantRange
6923   getAssumedConstantRange(Attributor &A,
6924                           const Instruction *CtxI = nullptr) const override {
6925     // TODO: Make SCEV use Attributor assumption.
6926     //       We may be able to bound a variable range via assumptions in
6927     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6928     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6929 
6930     if (!CtxI || CtxI == getCtxI())
6931       return getAssumed();
6932 
6933     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6934     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6935     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6936   }
6937 
6938   /// See AbstractAttribute::initialize(..).
6939   void initialize(Attributor &A) override {
6940     // Intersect a range given by SCEV.
6941     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6942 
6943     // Intersect a range given by LVI.
6944     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6945   }
6946 
6947   /// Helper function to create MDNode for range metadata.
6948   static MDNode *
6949   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6950                             const ConstantRange &AssumedConstantRange) {
6951     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6952                                   Ty, AssumedConstantRange.getLower())),
6953                               ConstantAsMetadata::get(ConstantInt::get(
6954                                   Ty, AssumedConstantRange.getUpper()))};
6955     return MDNode::get(Ctx, LowAndHigh);
6956   }
6957 
6958   /// Return true if \p Assumed is included in \p KnownRanges.
6959   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6960 
6961     if (Assumed.isFullSet())
6962       return false;
6963 
6964     if (!KnownRanges)
6965       return true;
6966 
6967     // If multiple ranges are annotated in IR, we give up to annotate assumed
6968     // range for now.
6969 
6970     // TODO:  If there exists a known range which containts assumed range, we
6971     // can say assumed range is better.
6972     if (KnownRanges->getNumOperands() > 2)
6973       return false;
6974 
6975     ConstantInt *Lower =
6976         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6977     ConstantInt *Upper =
6978         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6979 
6980     ConstantRange Known(Lower->getValue(), Upper->getValue());
6981     return Known.contains(Assumed) && Known != Assumed;
6982   }
6983 
6984   /// Helper function to set range metadata.
6985   static bool
6986   setRangeMetadataIfisBetterRange(Instruction *I,
6987                                   const ConstantRange &AssumedConstantRange) {
6988     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6989     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6990       if (!AssumedConstantRange.isEmptySet()) {
6991         I->setMetadata(LLVMContext::MD_range,
6992                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6993                                                  AssumedConstantRange));
6994         return true;
6995       }
6996     }
6997     return false;
6998   }
6999 
7000   /// See AbstractAttribute::manifest()
7001   ChangeStatus manifest(Attributor &A) override {
7002     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7003     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7004     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7005 
7006     auto &V = getAssociatedValue();
7007     if (!AssumedConstantRange.isEmptySet() &&
7008         !AssumedConstantRange.isSingleElement()) {
7009       if (Instruction *I = dyn_cast<Instruction>(&V))
7010         if (isa<CallInst>(I) || isa<LoadInst>(I))
7011           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7012             Changed = ChangeStatus::CHANGED;
7013     }
7014 
7015     return Changed;
7016   }
7017 };
7018 
7019 struct AAValueConstantRangeArgument final
7020     : AAArgumentFromCallSiteArguments<
7021           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
7022   using Base = AAArgumentFromCallSiteArguments<
7023       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
7024   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7025       : Base(IRP, A) {}
7026 
7027   /// See AbstractAttribute::initialize(..).
7028   void initialize(Attributor &A) override {
7029     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7030       indicatePessimisticFixpoint();
7031     } else {
7032       Base::initialize(A);
7033     }
7034   }
7035 
7036   /// See AbstractAttribute::trackStatistics()
7037   void trackStatistics() const override {
7038     STATS_DECLTRACK_ARG_ATTR(value_range)
7039   }
7040 };
7041 
7042 struct AAValueConstantRangeReturned
7043     : AAReturnedFromReturnedValues<AAValueConstantRange,
7044                                    AAValueConstantRangeImpl> {
7045   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
7046                                             AAValueConstantRangeImpl>;
7047   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7048       : Base(IRP, A) {}
7049 
7050   /// See AbstractAttribute::initialize(...).
7051   void initialize(Attributor &A) override {}
7052 
7053   /// See AbstractAttribute::trackStatistics()
7054   void trackStatistics() const override {
7055     STATS_DECLTRACK_FNRET_ATTR(value_range)
7056   }
7057 };
7058 
7059 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7060   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7061       : AAValueConstantRangeImpl(IRP, A) {}
7062 
7063   /// See AbstractAttribute::initialize(...).
7064   void initialize(Attributor &A) override {
7065     AAValueConstantRangeImpl::initialize(A);
7066     Value &V = getAssociatedValue();
7067 
7068     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7069       unionAssumed(ConstantRange(C->getValue()));
7070       indicateOptimisticFixpoint();
7071       return;
7072     }
7073 
7074     if (isa<UndefValue>(&V)) {
7075       // Collapse the undef state to 0.
7076       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7077       indicateOptimisticFixpoint();
7078       return;
7079     }
7080 
7081     if (isa<CallBase>(&V))
7082       return;
7083 
7084     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7085       return;
7086     // If it is a load instruction with range metadata, use it.
7087     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7088       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7089         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7090         return;
7091       }
7092 
7093     // We can work with PHI and select instruction as we traverse their operands
7094     // during update.
7095     if (isa<SelectInst>(V) || isa<PHINode>(V))
7096       return;
7097 
7098     // Otherwise we give up.
7099     indicatePessimisticFixpoint();
7100 
7101     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7102                       << getAssociatedValue() << "\n");
7103   }
7104 
7105   bool calculateBinaryOperator(
7106       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7107       const Instruction *CtxI,
7108       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7109     Value *LHS = BinOp->getOperand(0);
7110     Value *RHS = BinOp->getOperand(1);
7111     // TODO: Allow non integers as well.
7112     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7113       return false;
7114 
7115     auto &LHSAA =
7116         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7117     QuerriedAAs.push_back(&LHSAA);
7118     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7119 
7120     auto &RHSAA =
7121         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7122     QuerriedAAs.push_back(&RHSAA);
7123     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7124 
7125     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7126 
7127     T.unionAssumed(AssumedRange);
7128 
7129     // TODO: Track a known state too.
7130 
7131     return T.isValidState();
7132   }
7133 
7134   bool calculateCastInst(
7135       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7136       const Instruction *CtxI,
7137       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7138     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7139     // TODO: Allow non integers as well.
7140     Value &OpV = *CastI->getOperand(0);
7141     if (!OpV.getType()->isIntegerTy())
7142       return false;
7143 
7144     auto &OpAA =
7145         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7146     QuerriedAAs.push_back(&OpAA);
7147     T.unionAssumed(
7148         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7149     return T.isValidState();
7150   }
7151 
7152   bool
7153   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7154                    const Instruction *CtxI,
7155                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7156     Value *LHS = CmpI->getOperand(0);
7157     Value *RHS = CmpI->getOperand(1);
7158     // TODO: Allow non integers as well.
7159     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7160       return false;
7161 
7162     auto &LHSAA =
7163         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7164     QuerriedAAs.push_back(&LHSAA);
7165     auto &RHSAA =
7166         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7167     QuerriedAAs.push_back(&RHSAA);
7168 
7169     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7170     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7171 
7172     // If one of them is empty set, we can't decide.
7173     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7174       return true;
7175 
7176     bool MustTrue = false, MustFalse = false;
7177 
7178     auto AllowedRegion =
7179         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7180 
7181     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7182         CmpI->getPredicate(), RHSAARange);
7183 
7184     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7185       MustFalse = true;
7186 
7187     if (SatisfyingRegion.contains(LHSAARange))
7188       MustTrue = true;
7189 
7190     assert((!MustTrue || !MustFalse) &&
7191            "Either MustTrue or MustFalse should be false!");
7192 
7193     if (MustTrue)
7194       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7195     else if (MustFalse)
7196       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7197     else
7198       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7199 
7200     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7201                       << " " << RHSAA << "\n");
7202 
7203     // TODO: Track a known state too.
7204     return T.isValidState();
7205   }
7206 
7207   /// See AbstractAttribute::updateImpl(...).
7208   ChangeStatus updateImpl(Attributor &A) override {
7209     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7210                             IntegerRangeState &T, bool Stripped) -> bool {
7211       Instruction *I = dyn_cast<Instruction>(&V);
7212       if (!I || isa<CallBase>(I)) {
7213 
7214         // If the value is not instruction, we query AA to Attributor.
7215         const auto &AA =
7216             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7217 
7218         // Clamp operator is not used to utilize a program point CtxI.
7219         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7220 
7221         return T.isValidState();
7222       }
7223 
7224       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7225       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7226         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7227           return false;
7228       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7229         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7230           return false;
7231       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7232         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7233           return false;
7234       } else {
7235         // Give up with other instructions.
7236         // TODO: Add other instructions
7237 
7238         T.indicatePessimisticFixpoint();
7239         return false;
7240       }
7241 
7242       // Catch circular reasoning in a pessimistic way for now.
7243       // TODO: Check how the range evolves and if we stripped anything, see also
7244       //       AADereferenceable or AAAlign for similar situations.
7245       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7246         if (QueriedAA != this)
7247           continue;
7248         // If we are in a stady state we do not need to worry.
7249         if (T.getAssumed() == getState().getAssumed())
7250           continue;
7251         T.indicatePessimisticFixpoint();
7252       }
7253 
7254       return T.isValidState();
7255     };
7256 
7257     IntegerRangeState T(getBitWidth());
7258 
7259     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7260             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7261             /* UseValueSimplify */ false))
7262       return indicatePessimisticFixpoint();
7263 
7264     return clampStateAndIndicateChange(getState(), T);
7265   }
7266 
7267   /// See AbstractAttribute::trackStatistics()
7268   void trackStatistics() const override {
7269     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7270   }
7271 };
7272 
7273 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7274   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7275       : AAValueConstantRangeImpl(IRP, A) {}
7276 
7277   /// See AbstractAttribute::initialize(...).
7278   ChangeStatus updateImpl(Attributor &A) override {
7279     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7280                      "not be called");
7281   }
7282 
7283   /// See AbstractAttribute::trackStatistics()
7284   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7285 };
7286 
7287 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7288   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7289       : AAValueConstantRangeFunction(IRP, A) {}
7290 
7291   /// See AbstractAttribute::trackStatistics()
7292   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7293 };
7294 
7295 struct AAValueConstantRangeCallSiteReturned
7296     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7297                                      AAValueConstantRangeImpl> {
7298   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7299       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7300                                        AAValueConstantRangeImpl>(IRP, A) {}
7301 
7302   /// See AbstractAttribute::initialize(...).
7303   void initialize(Attributor &A) override {
7304     // If it is a load instruction with range metadata, use the metadata.
7305     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7306       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7307         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7308 
7309     AAValueConstantRangeImpl::initialize(A);
7310   }
7311 
7312   /// See AbstractAttribute::trackStatistics()
7313   void trackStatistics() const override {
7314     STATS_DECLTRACK_CSRET_ATTR(value_range)
7315   }
7316 };
7317 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7318   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7319       : AAValueConstantRangeFloating(IRP, A) {}
7320 
7321   /// See AbstractAttribute::trackStatistics()
7322   void trackStatistics() const override {
7323     STATS_DECLTRACK_CSARG_ATTR(value_range)
7324   }
7325 };
7326 
7327 /// ------------------ Potential Values Attribute -------------------------
7328 
7329 struct AAPotentialValuesImpl : AAPotentialValues {
7330   using StateType = PotentialConstantIntValuesState;
7331 
7332   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7333       : AAPotentialValues(IRP, A) {}
7334 
7335   /// See AbstractAttribute::getAsStr().
7336   const std::string getAsStr() const override {
7337     std::string Str;
7338     llvm::raw_string_ostream OS(Str);
7339     OS << getState();
7340     return OS.str();
7341   }
7342 
7343   /// See AbstractAttribute::updateImpl(...).
7344   ChangeStatus updateImpl(Attributor &A) override {
7345     return indicatePessimisticFixpoint();
7346   }
7347 };
7348 
7349 struct AAPotentialValuesArgument final
7350     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7351                                       PotentialConstantIntValuesState> {
7352   using Base =
7353       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7354                                       PotentialConstantIntValuesState>;
7355   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7356       : Base(IRP, A) {}
7357 
7358   /// See AbstractAttribute::initialize(..).
7359   void initialize(Attributor &A) override {
7360     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7361       indicatePessimisticFixpoint();
7362     } else {
7363       Base::initialize(A);
7364     }
7365   }
7366 
7367   /// See AbstractAttribute::trackStatistics()
7368   void trackStatistics() const override {
7369     STATS_DECLTRACK_ARG_ATTR(potential_values)
7370   }
7371 };
7372 
7373 struct AAPotentialValuesReturned
7374     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7375   using Base =
7376       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7377   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7378       : Base(IRP, A) {}
7379 
7380   /// See AbstractAttribute::trackStatistics()
7381   void trackStatistics() const override {
7382     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7383   }
7384 };
7385 
7386 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7387   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7388       : AAPotentialValuesImpl(IRP, A) {}
7389 
7390   /// See AbstractAttribute::initialize(..).
7391   void initialize(Attributor &A) override {
7392     Value &V = getAssociatedValue();
7393 
7394     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7395       unionAssumed(C->getValue());
7396       indicateOptimisticFixpoint();
7397       return;
7398     }
7399 
7400     if (isa<UndefValue>(&V)) {
7401       unionAssumedWithUndef();
7402       indicateOptimisticFixpoint();
7403       return;
7404     }
7405 
7406     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7407       return;
7408 
7409     if (isa<SelectInst>(V) || isa<PHINode>(V))
7410       return;
7411 
7412     indicatePessimisticFixpoint();
7413 
7414     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7415                       << getAssociatedValue() << "\n");
7416   }
7417 
7418   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7419                                 const APInt &RHS) {
7420     ICmpInst::Predicate Pred = ICI->getPredicate();
7421     switch (Pred) {
7422     case ICmpInst::ICMP_UGT:
7423       return LHS.ugt(RHS);
7424     case ICmpInst::ICMP_SGT:
7425       return LHS.sgt(RHS);
7426     case ICmpInst::ICMP_EQ:
7427       return LHS.eq(RHS);
7428     case ICmpInst::ICMP_UGE:
7429       return LHS.uge(RHS);
7430     case ICmpInst::ICMP_SGE:
7431       return LHS.sge(RHS);
7432     case ICmpInst::ICMP_ULT:
7433       return LHS.ult(RHS);
7434     case ICmpInst::ICMP_SLT:
7435       return LHS.slt(RHS);
7436     case ICmpInst::ICMP_NE:
7437       return LHS.ne(RHS);
7438     case ICmpInst::ICMP_ULE:
7439       return LHS.ule(RHS);
7440     case ICmpInst::ICMP_SLE:
7441       return LHS.sle(RHS);
7442     default:
7443       llvm_unreachable("Invalid ICmp predicate!");
7444     }
7445   }
7446 
7447   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7448                                  uint32_t ResultBitWidth) {
7449     Instruction::CastOps CastOp = CI->getOpcode();
7450     switch (CastOp) {
7451     default:
7452       llvm_unreachable("unsupported or not integer cast");
7453     case Instruction::Trunc:
7454       return Src.trunc(ResultBitWidth);
7455     case Instruction::SExt:
7456       return Src.sext(ResultBitWidth);
7457     case Instruction::ZExt:
7458       return Src.zext(ResultBitWidth);
7459     case Instruction::BitCast:
7460       return Src;
7461     }
7462   }
7463 
7464   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7465                                        const APInt &LHS, const APInt &RHS,
7466                                        bool &SkipOperation, bool &Unsupported) {
7467     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7468     // Unsupported is set to true when the binary operator is not supported.
7469     // SkipOperation is set to true when UB occur with the given operand pair
7470     // (LHS, RHS).
7471     // TODO: we should look at nsw and nuw keywords to handle operations
7472     //       that create poison or undef value.
7473     switch (BinOpcode) {
7474     default:
7475       Unsupported = true;
7476       return LHS;
7477     case Instruction::Add:
7478       return LHS + RHS;
7479     case Instruction::Sub:
7480       return LHS - RHS;
7481     case Instruction::Mul:
7482       return LHS * RHS;
7483     case Instruction::UDiv:
7484       if (RHS.isNullValue()) {
7485         SkipOperation = true;
7486         return LHS;
7487       }
7488       return LHS.udiv(RHS);
7489     case Instruction::SDiv:
7490       if (RHS.isNullValue()) {
7491         SkipOperation = true;
7492         return LHS;
7493       }
7494       return LHS.sdiv(RHS);
7495     case Instruction::URem:
7496       if (RHS.isNullValue()) {
7497         SkipOperation = true;
7498         return LHS;
7499       }
7500       return LHS.urem(RHS);
7501     case Instruction::SRem:
7502       if (RHS.isNullValue()) {
7503         SkipOperation = true;
7504         return LHS;
7505       }
7506       return LHS.srem(RHS);
7507     case Instruction::Shl:
7508       return LHS.shl(RHS);
7509     case Instruction::LShr:
7510       return LHS.lshr(RHS);
7511     case Instruction::AShr:
7512       return LHS.ashr(RHS);
7513     case Instruction::And:
7514       return LHS & RHS;
7515     case Instruction::Or:
7516       return LHS | RHS;
7517     case Instruction::Xor:
7518       return LHS ^ RHS;
7519     }
7520   }
7521 
7522   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7523                                            const APInt &LHS, const APInt &RHS) {
7524     bool SkipOperation = false;
7525     bool Unsupported = false;
7526     APInt Result =
7527         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7528     if (Unsupported)
7529       return false;
7530     // If SkipOperation is true, we can ignore this operand pair (L, R).
7531     if (!SkipOperation)
7532       unionAssumed(Result);
7533     return isValidState();
7534   }
7535 
7536   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7537     auto AssumedBefore = getAssumed();
7538     Value *LHS = ICI->getOperand(0);
7539     Value *RHS = ICI->getOperand(1);
7540     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7541       return indicatePessimisticFixpoint();
7542 
7543     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7544     if (!LHSAA.isValidState())
7545       return indicatePessimisticFixpoint();
7546 
7547     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7548     if (!RHSAA.isValidState())
7549       return indicatePessimisticFixpoint();
7550 
7551     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7552     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7553 
7554     // TODO: make use of undef flag to limit potential values aggressively.
7555     bool MaybeTrue = false, MaybeFalse = false;
7556     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7557     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7558       // The result of any comparison between undefs can be soundly replaced
7559       // with undef.
7560       unionAssumedWithUndef();
7561     } else if (LHSAA.undefIsContained()) {
7562       bool MaybeTrue = false, MaybeFalse = false;
7563       for (const APInt &R : RHSAAPVS) {
7564         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7565         MaybeTrue |= CmpResult;
7566         MaybeFalse |= !CmpResult;
7567         if (MaybeTrue & MaybeFalse)
7568           return indicatePessimisticFixpoint();
7569       }
7570     } else if (RHSAA.undefIsContained()) {
7571       for (const APInt &L : LHSAAPVS) {
7572         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7573         MaybeTrue |= CmpResult;
7574         MaybeFalse |= !CmpResult;
7575         if (MaybeTrue & MaybeFalse)
7576           return indicatePessimisticFixpoint();
7577       }
7578     } else {
7579       for (const APInt &L : LHSAAPVS) {
7580         for (const APInt &R : RHSAAPVS) {
7581           bool CmpResult = calculateICmpInst(ICI, L, R);
7582           MaybeTrue |= CmpResult;
7583           MaybeFalse |= !CmpResult;
7584           if (MaybeTrue & MaybeFalse)
7585             return indicatePessimisticFixpoint();
7586         }
7587       }
7588     }
7589     if (MaybeTrue)
7590       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7591     if (MaybeFalse)
7592       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7593     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7594                                          : ChangeStatus::CHANGED;
7595   }
7596 
7597   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7598     auto AssumedBefore = getAssumed();
7599     Value *LHS = SI->getTrueValue();
7600     Value *RHS = SI->getFalseValue();
7601     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7602       return indicatePessimisticFixpoint();
7603 
7604     // TODO: Use assumed simplified condition value
7605     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7606     if (!LHSAA.isValidState())
7607       return indicatePessimisticFixpoint();
7608 
7609     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7610     if (!RHSAA.isValidState())
7611       return indicatePessimisticFixpoint();
7612 
7613     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7614       // select i1 *, undef , undef => undef
7615       unionAssumedWithUndef();
7616     else {
7617       unionAssumed(LHSAA);
7618       unionAssumed(RHSAA);
7619     }
7620     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7621                                          : ChangeStatus::CHANGED;
7622   }
7623 
7624   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7625     auto AssumedBefore = getAssumed();
7626     if (!CI->isIntegerCast())
7627       return indicatePessimisticFixpoint();
7628     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7629     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7630     Value *Src = CI->getOperand(0);
7631     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src));
7632     if (!SrcAA.isValidState())
7633       return indicatePessimisticFixpoint();
7634     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7635     if (SrcAA.undefIsContained())
7636       unionAssumedWithUndef();
7637     else {
7638       for (const APInt &S : SrcAAPVS) {
7639         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7640         unionAssumed(T);
7641       }
7642     }
7643     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7644                                          : ChangeStatus::CHANGED;
7645   }
7646 
7647   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7648     auto AssumedBefore = getAssumed();
7649     Value *LHS = BinOp->getOperand(0);
7650     Value *RHS = BinOp->getOperand(1);
7651     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7652       return indicatePessimisticFixpoint();
7653 
7654     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7655     if (!LHSAA.isValidState())
7656       return indicatePessimisticFixpoint();
7657 
7658     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7659     if (!RHSAA.isValidState())
7660       return indicatePessimisticFixpoint();
7661 
7662     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7663     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7664     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7665 
7666     // TODO: make use of undef flag to limit potential values aggressively.
7667     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7668       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7669         return indicatePessimisticFixpoint();
7670     } else if (LHSAA.undefIsContained()) {
7671       for (const APInt &R : RHSAAPVS) {
7672         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7673           return indicatePessimisticFixpoint();
7674       }
7675     } else if (RHSAA.undefIsContained()) {
7676       for (const APInt &L : LHSAAPVS) {
7677         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7678           return indicatePessimisticFixpoint();
7679       }
7680     } else {
7681       for (const APInt &L : LHSAAPVS) {
7682         for (const APInt &R : RHSAAPVS) {
7683           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7684             return indicatePessimisticFixpoint();
7685         }
7686       }
7687     }
7688     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7689                                          : ChangeStatus::CHANGED;
7690   }
7691 
7692   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7693     auto AssumedBefore = getAssumed();
7694     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7695       Value *IncomingValue = PHI->getIncomingValue(u);
7696       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7697           *this, IRPosition::value(*IncomingValue));
7698       if (!PotentialValuesAA.isValidState())
7699         return indicatePessimisticFixpoint();
7700       if (PotentialValuesAA.undefIsContained())
7701         unionAssumedWithUndef();
7702       else
7703         unionAssumed(PotentialValuesAA.getAssumed());
7704     }
7705     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7706                                          : ChangeStatus::CHANGED;
7707   }
7708 
7709   /// See AbstractAttribute::updateImpl(...).
7710   ChangeStatus updateImpl(Attributor &A) override {
7711     Value &V = getAssociatedValue();
7712     Instruction *I = dyn_cast<Instruction>(&V);
7713 
7714     if (auto *ICI = dyn_cast<ICmpInst>(I))
7715       return updateWithICmpInst(A, ICI);
7716 
7717     if (auto *SI = dyn_cast<SelectInst>(I))
7718       return updateWithSelectInst(A, SI);
7719 
7720     if (auto *CI = dyn_cast<CastInst>(I))
7721       return updateWithCastInst(A, CI);
7722 
7723     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7724       return updateWithBinaryOperator(A, BinOp);
7725 
7726     if (auto *PHI = dyn_cast<PHINode>(I))
7727       return updateWithPHINode(A, PHI);
7728 
7729     return indicatePessimisticFixpoint();
7730   }
7731 
7732   /// See AbstractAttribute::trackStatistics()
7733   void trackStatistics() const override {
7734     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7735   }
7736 };
7737 
7738 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7739   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7740       : AAPotentialValuesImpl(IRP, A) {}
7741 
7742   /// See AbstractAttribute::initialize(...).
7743   ChangeStatus updateImpl(Attributor &A) override {
7744     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7745                      "not be called");
7746   }
7747 
7748   /// See AbstractAttribute::trackStatistics()
7749   void trackStatistics() const override {
7750     STATS_DECLTRACK_FN_ATTR(potential_values)
7751   }
7752 };
7753 
7754 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7755   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7756       : AAPotentialValuesFunction(IRP, A) {}
7757 
7758   /// See AbstractAttribute::trackStatistics()
7759   void trackStatistics() const override {
7760     STATS_DECLTRACK_CS_ATTR(potential_values)
7761   }
7762 };
7763 
7764 struct AAPotentialValuesCallSiteReturned
7765     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7766   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7767       : AACallSiteReturnedFromReturned<AAPotentialValues,
7768                                        AAPotentialValuesImpl>(IRP, A) {}
7769 
7770   /// See AbstractAttribute::trackStatistics()
7771   void trackStatistics() const override {
7772     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7773   }
7774 };
7775 
7776 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7777   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7778       : AAPotentialValuesFloating(IRP, A) {}
7779 
7780   /// See AbstractAttribute::initialize(..).
7781   void initialize(Attributor &A) override {
7782     Value &V = getAssociatedValue();
7783 
7784     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7785       unionAssumed(C->getValue());
7786       indicateOptimisticFixpoint();
7787       return;
7788     }
7789 
7790     if (isa<UndefValue>(&V)) {
7791       unionAssumedWithUndef();
7792       indicateOptimisticFixpoint();
7793       return;
7794     }
7795   }
7796 
7797   /// See AbstractAttribute::updateImpl(...).
7798   ChangeStatus updateImpl(Attributor &A) override {
7799     Value &V = getAssociatedValue();
7800     auto AssumedBefore = getAssumed();
7801     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V));
7802     const auto &S = AA.getAssumed();
7803     unionAssumed(S);
7804     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7805                                          : ChangeStatus::CHANGED;
7806   }
7807 
7808   /// See AbstractAttribute::trackStatistics()
7809   void trackStatistics() const override {
7810     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7811   }
7812 };
7813 
7814 /// ------------------------ NoUndef Attribute ---------------------------------
7815 struct AANoUndefImpl : AANoUndef {
7816   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7817 
7818   /// See AbstractAttribute::initialize(...).
7819   void initialize(Attributor &A) override {
7820     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7821       indicateOptimisticFixpoint();
7822       return;
7823     }
7824     Value &V = getAssociatedValue();
7825     if (isa<UndefValue>(V))
7826       indicatePessimisticFixpoint();
7827     else if (isa<FreezeInst>(V))
7828       indicateOptimisticFixpoint();
7829     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7830              isGuaranteedNotToBeUndefOrPoison(&V))
7831       indicateOptimisticFixpoint();
7832     else
7833       AANoUndef::initialize(A);
7834   }
7835 
7836   /// See followUsesInMBEC
7837   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
7838                        AANoUndef::StateType &State) {
7839     const Value *UseV = U->get();
7840     const DominatorTree *DT = nullptr;
7841     if (Function *F = getAnchorScope())
7842       DT = A.getInfoCache().getAnalysisResultForFunction<DominatorTreeAnalysis>(
7843           *F);
7844     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, I, DT));
7845     bool TrackUse = false;
7846     // Track use for instructions which must produce undef or poison bits when
7847     // at least one operand contains such bits.
7848     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
7849       TrackUse = true;
7850     return TrackUse;
7851   }
7852 
7853   /// See AbstractAttribute::getAsStr().
7854   const std::string getAsStr() const override {
7855     return getAssumed() ? "noundef" : "may-undef-or-poison";
7856   }
7857 
7858   ChangeStatus manifest(Attributor &A) override {
7859     // We don't manifest noundef attribute for dead positions because the
7860     // associated values with dead positions would be replaced with undef
7861     // values.
7862     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
7863       return ChangeStatus::UNCHANGED;
7864     // A position whose simplified value does not have any value is
7865     // considered to be dead. We don't manifest noundef in such positions for
7866     // the same reason above.
7867     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
7868         *this, getIRPosition(), /* TrackDependence */ false);
7869     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
7870       return ChangeStatus::UNCHANGED;
7871     return AANoUndef::manifest(A);
7872   }
7873 };
7874 
7875 struct AANoUndefFloating : public AANoUndefImpl {
7876   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
7877       : AANoUndefImpl(IRP, A) {}
7878 
7879   /// See AbstractAttribute::initialize(...).
7880   void initialize(Attributor &A) override {
7881     AANoUndefImpl::initialize(A);
7882     if (!getState().isAtFixpoint())
7883       if (Instruction *CtxI = getCtxI())
7884         followUsesInMBEC(*this, A, getState(), *CtxI);
7885   }
7886 
7887   /// See AbstractAttribute::updateImpl(...).
7888   ChangeStatus updateImpl(Attributor &A) override {
7889     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7890                             AANoUndef::StateType &T, bool Stripped) -> bool {
7891       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V));
7892       if (!Stripped && this == &AA) {
7893         T.indicatePessimisticFixpoint();
7894       } else {
7895         const AANoUndef::StateType &S =
7896             static_cast<const AANoUndef::StateType &>(AA.getState());
7897         T ^= S;
7898       }
7899       return T.isValidState();
7900     };
7901 
7902     StateType T;
7903     if (!genericValueTraversal<AANoUndef, StateType>(
7904             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
7905       return indicatePessimisticFixpoint();
7906 
7907     return clampStateAndIndicateChange(getState(), T);
7908   }
7909 
7910   /// See AbstractAttribute::trackStatistics()
7911   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7912 };
7913 
7914 struct AANoUndefReturned final
7915     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
7916   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
7917       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
7918 
7919   /// See AbstractAttribute::trackStatistics()
7920   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7921 };
7922 
7923 struct AANoUndefArgument final
7924     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
7925   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
7926       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
7927 
7928   /// See AbstractAttribute::trackStatistics()
7929   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
7930 };
7931 
7932 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
7933   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
7934       : AANoUndefFloating(IRP, A) {}
7935 
7936   /// See AbstractAttribute::trackStatistics()
7937   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
7938 };
7939 
7940 struct AANoUndefCallSiteReturned final
7941     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
7942   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
7943       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
7944 
7945   /// See AbstractAttribute::trackStatistics()
7946   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
7947 };
7948 } // namespace
7949 
7950 const char AAReturnedValues::ID = 0;
7951 const char AANoUnwind::ID = 0;
7952 const char AANoSync::ID = 0;
7953 const char AANoFree::ID = 0;
7954 const char AANonNull::ID = 0;
7955 const char AANoRecurse::ID = 0;
7956 const char AAWillReturn::ID = 0;
7957 const char AAUndefinedBehavior::ID = 0;
7958 const char AANoAlias::ID = 0;
7959 const char AAReachability::ID = 0;
7960 const char AANoReturn::ID = 0;
7961 const char AAIsDead::ID = 0;
7962 const char AADereferenceable::ID = 0;
7963 const char AAAlign::ID = 0;
7964 const char AANoCapture::ID = 0;
7965 const char AAValueSimplify::ID = 0;
7966 const char AAHeapToStack::ID = 0;
7967 const char AAPrivatizablePtr::ID = 0;
7968 const char AAMemoryBehavior::ID = 0;
7969 const char AAMemoryLocation::ID = 0;
7970 const char AAValueConstantRange::ID = 0;
7971 const char AAPotentialValues::ID = 0;
7972 const char AANoUndef::ID = 0;
7973 
7974 // Macro magic to create the static generator function for attributes that
7975 // follow the naming scheme.
7976 
7977 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7978   case IRPosition::PK:                                                         \
7979     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7980 
7981 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7982   case IRPosition::PK:                                                         \
7983     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7984     ++NumAAs;                                                                  \
7985     break;
7986 
7987 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7988   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7989     CLASS *AA = nullptr;                                                       \
7990     switch (IRP.getPositionKind()) {                                           \
7991       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7992       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7993       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7994       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7995       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7996       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7997       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7998       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7999     }                                                                          \
8000     return *AA;                                                                \
8001   }
8002 
8003 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8004   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8005     CLASS *AA = nullptr;                                                       \
8006     switch (IRP.getPositionKind()) {                                           \
8007       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8008       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8009       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8010       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8011       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8012       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8013       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8014       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8015     }                                                                          \
8016     return *AA;                                                                \
8017   }
8018 
8019 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8020   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8021     CLASS *AA = nullptr;                                                       \
8022     switch (IRP.getPositionKind()) {                                           \
8023       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8024       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8025       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8026       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8027       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8028       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8029       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8030       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8031     }                                                                          \
8032     return *AA;                                                                \
8033   }
8034 
8035 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8036   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8037     CLASS *AA = nullptr;                                                       \
8038     switch (IRP.getPositionKind()) {                                           \
8039       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8040       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8041       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8042       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8043       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8044       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8045       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8046       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8047     }                                                                          \
8048     return *AA;                                                                \
8049   }
8050 
8051 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8052   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8053     CLASS *AA = nullptr;                                                       \
8054     switch (IRP.getPositionKind()) {                                           \
8055       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8056       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8057       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8058       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8059       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8060       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8061       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8062       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8063     }                                                                          \
8064     return *AA;                                                                \
8065   }
8066 
8067 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8068 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8069 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8070 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8071 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8072 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8073 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8074 
8075 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8076 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8077 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8078 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8079 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8080 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8081 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8082 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8083 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8084 
8085 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8086 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8087 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8088 
8089 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8090 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8091 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8092 
8093 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8094 
8095 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8096 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8097 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8098 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8099 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8100 #undef SWITCH_PK_CREATE
8101 #undef SWITCH_PK_INV
8102