1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumeBundleQueries.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/LazyValueInfo.h"
23 #include "llvm/Analysis/MemoryBuiltins.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/NoFolder.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 
34 #include <cassert>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "attributor"
39 
40 static cl::opt<bool> ManifestInternal(
41     "attributor-manifest-internal", cl::Hidden,
42     cl::desc("Manifest Attributor internal string attributes."),
43     cl::init(false));
44 
45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
46                                        cl::Hidden);
47 
48 template <>
49 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
50 
51 static cl::opt<unsigned, true> MaxPotentialValues(
52     "attributor-max-potential-values", cl::Hidden,
53     cl::desc("Maximum number of potential values to be "
54              "tracked for each position."),
55     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
56     cl::init(7));
57 
58 STATISTIC(NumAAs, "Number of abstract attributes created");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separately.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // Specialization of the operator<< for abstract attributes subclasses. This
105 // disambiguates situations where multiple operators are applicable.
106 namespace llvm {
107 #define PIPE_OPERATOR(CLASS)                                                   \
108   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
109     return OS << static_cast<const AbstractAttribute &>(AA);                   \
110   }
111 
112 PIPE_OPERATOR(AAIsDead)
113 PIPE_OPERATOR(AANoUnwind)
114 PIPE_OPERATOR(AANoSync)
115 PIPE_OPERATOR(AANoRecurse)
116 PIPE_OPERATOR(AAWillReturn)
117 PIPE_OPERATOR(AANoReturn)
118 PIPE_OPERATOR(AAReturnedValues)
119 PIPE_OPERATOR(AANonNull)
120 PIPE_OPERATOR(AANoAlias)
121 PIPE_OPERATOR(AADereferenceable)
122 PIPE_OPERATOR(AAAlign)
123 PIPE_OPERATOR(AANoCapture)
124 PIPE_OPERATOR(AAValueSimplify)
125 PIPE_OPERATOR(AANoFree)
126 PIPE_OPERATOR(AAHeapToStack)
127 PIPE_OPERATOR(AAReachability)
128 PIPE_OPERATOR(AAMemoryBehavior)
129 PIPE_OPERATOR(AAMemoryLocation)
130 PIPE_OPERATOR(AAValueConstantRange)
131 PIPE_OPERATOR(AAPrivatizablePtr)
132 PIPE_OPERATOR(AAUndefinedBehavior)
133 PIPE_OPERATOR(AAPotentialValues)
134 
135 #undef PIPE_OPERATOR
136 } // namespace llvm
137 
138 namespace {
139 
140 static Optional<ConstantInt *>
141 getAssumedConstantInt(Attributor &A, const Value &V,
142                       const AbstractAttribute &AA,
143                       bool &UsedAssumedInformation) {
144   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
145   if (C.hasValue())
146     return dyn_cast_or_null<ConstantInt>(C.getValue());
147   return llvm::None;
148 }
149 
150 /// Get pointer operand of memory accessing instruction. If \p I is
151 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
152 /// is set to false and the instruction is volatile, return nullptr.
153 static const Value *getPointerOperand(const Instruction *I,
154                                       bool AllowVolatile) {
155   if (auto *LI = dyn_cast<LoadInst>(I)) {
156     if (!AllowVolatile && LI->isVolatile())
157       return nullptr;
158     return LI->getPointerOperand();
159   }
160 
161   if (auto *SI = dyn_cast<StoreInst>(I)) {
162     if (!AllowVolatile && SI->isVolatile())
163       return nullptr;
164     return SI->getPointerOperand();
165   }
166 
167   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
168     if (!AllowVolatile && CXI->isVolatile())
169       return nullptr;
170     return CXI->getPointerOperand();
171   }
172 
173   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
174     if (!AllowVolatile && RMWI->isVolatile())
175       return nullptr;
176     return RMWI->getPointerOperand();
177   }
178 
179   return nullptr;
180 }
181 
182 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
183 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
184 /// getelement pointer instructions that traverse the natural type of \p Ptr if
185 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
186 /// through a cast to i8*.
187 ///
188 /// TODO: This could probably live somewhere more prominantly if it doesn't
189 ///       already exist.
190 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
191                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
192   assert(Offset >= 0 && "Negative offset not supported yet!");
193   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
194                     << "-bytes as " << *ResTy << "\n");
195 
196   // The initial type we are trying to traverse to get nice GEPs.
197   Type *Ty = Ptr->getType();
198 
199   SmallVector<Value *, 4> Indices;
200   std::string GEPName = Ptr->getName().str();
201   while (Offset) {
202     uint64_t Idx, Rem;
203 
204     if (auto *STy = dyn_cast<StructType>(Ty)) {
205       const StructLayout *SL = DL.getStructLayout(STy);
206       if (int64_t(SL->getSizeInBytes()) < Offset)
207         break;
208       Idx = SL->getElementContainingOffset(Offset);
209       assert(Idx < STy->getNumElements() && "Offset calculation error!");
210       Rem = Offset - SL->getElementOffset(Idx);
211       Ty = STy->getElementType(Idx);
212     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
213       Ty = PTy->getElementType();
214       if (!Ty->isSized())
215         break;
216       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
217       assert(ElementSize && "Expected type with size!");
218       Idx = Offset / ElementSize;
219       Rem = Offset % ElementSize;
220     } else {
221       // Non-aggregate type, we cast and make byte-wise progress now.
222       break;
223     }
224 
225     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
226                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
227 
228     GEPName += "." + std::to_string(Idx);
229     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
230     Offset = Rem;
231   }
232 
233   // Create a GEP if we collected indices above.
234   if (Indices.size())
235     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
236 
237   // If an offset is left we use byte-wise adjustment.
238   if (Offset) {
239     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
240     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
241                         GEPName + ".b" + Twine(Offset));
242   }
243 
244   // Ensure the result has the requested type.
245   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
246 
247   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
248   return Ptr;
249 }
250 
251 /// Recursively visit all values that might become \p IRP at some point. This
252 /// will be done by looking through cast instructions, selects, phis, and calls
253 /// with the "returned" attribute. Once we cannot look through the value any
254 /// further, the callback \p VisitValueCB is invoked and passed the current
255 /// value, the \p State, and a flag to indicate if we stripped anything.
256 /// Stripped means that we unpacked the value associated with \p IRP at least
257 /// once. Note that the value used for the callback may still be the value
258 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
259 /// we will never visit more values than specified by \p MaxValues.
260 template <typename AAType, typename StateTy>
261 static bool genericValueTraversal(
262     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
263     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
264         VisitValueCB,
265     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
266     function_ref<Value *(Value *)> StripCB = nullptr) {
267 
268   const AAIsDead *LivenessAA = nullptr;
269   if (IRP.getAnchorScope())
270     LivenessAA = &A.getAAFor<AAIsDead>(
271         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
272         /* TrackDependence */ false);
273   bool AnyDead = false;
274 
275   using Item = std::pair<Value *, const Instruction *>;
276   SmallSet<Item, 16> Visited;
277   SmallVector<Item, 16> Worklist;
278   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
279 
280   int Iteration = 0;
281   do {
282     Item I = Worklist.pop_back_val();
283     Value *V = I.first;
284     CtxI = I.second;
285     if (StripCB)
286       V = StripCB(V);
287 
288     // Check if we should process the current value. To prevent endless
289     // recursion keep a record of the values we followed!
290     if (!Visited.insert(I).second)
291       continue;
292 
293     // Make sure we limit the compile time for complex expressions.
294     if (Iteration++ >= MaxValues)
295       return false;
296 
297     // Explicitly look through calls with a "returned" attribute if we do
298     // not have a pointer as stripPointerCasts only works on them.
299     Value *NewV = nullptr;
300     if (V->getType()->isPointerTy()) {
301       NewV = V->stripPointerCasts();
302     } else {
303       auto *CB = dyn_cast<CallBase>(V);
304       if (CB && CB->getCalledFunction()) {
305         for (Argument &Arg : CB->getCalledFunction()->args())
306           if (Arg.hasReturnedAttr()) {
307             NewV = CB->getArgOperand(Arg.getArgNo());
308             break;
309           }
310       }
311     }
312     if (NewV && NewV != V) {
313       Worklist.push_back({NewV, CtxI});
314       continue;
315     }
316 
317     // Look through select instructions, visit both potential values.
318     if (auto *SI = dyn_cast<SelectInst>(V)) {
319       Worklist.push_back({SI->getTrueValue(), CtxI});
320       Worklist.push_back({SI->getFalseValue(), CtxI});
321       continue;
322     }
323 
324     // Look through phi nodes, visit all live operands.
325     if (auto *PHI = dyn_cast<PHINode>(V)) {
326       assert(LivenessAA &&
327              "Expected liveness in the presence of instructions!");
328       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
329         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
330         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
331                             LivenessAA,
332                             /* CheckBBLivenessOnly */ true)) {
333           AnyDead = true;
334           continue;
335         }
336         Worklist.push_back(
337             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
338       }
339       continue;
340     }
341 
342     if (UseValueSimplify && !isa<Constant>(V)) {
343       bool UsedAssumedInformation = false;
344       Optional<Constant *> C =
345           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
346       if (!C.hasValue())
347         continue;
348       if (Value *NewV = C.getValue()) {
349         Worklist.push_back({NewV, CtxI});
350         continue;
351       }
352     }
353 
354     // Once a leaf is reached we inform the user through the callback.
355     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
356       return false;
357   } while (!Worklist.empty());
358 
359   // If we actually used liveness information so we have to record a dependence.
360   if (AnyDead)
361     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
362 
363   // All values have been visited.
364   return true;
365 }
366 
367 const Value *stripAndAccumulateMinimalOffsets(
368     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
369     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
370     bool UseAssumed = false) {
371 
372   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
373     const IRPosition &Pos = IRPosition::value(V);
374     // Only track dependence if we are going to use the assumed info.
375     const AAValueConstantRange &ValueConstantRangeAA =
376         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
377                                          /* TrackDependence */ UseAssumed);
378     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
379                                      : ValueConstantRangeAA.getKnown();
380     // We can only use the lower part of the range because the upper part can
381     // be higher than what the value can really be.
382     ROffset = Range.getSignedMin();
383     return true;
384   };
385 
386   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
387                                                 AttributorAnalysis);
388 }
389 
390 static const Value *getMinimalBaseOfAccsesPointerOperand(
391     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
392     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
393   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
394   if (!Ptr)
395     return nullptr;
396   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
397   const Value *Base = stripAndAccumulateMinimalOffsets(
398       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
399 
400   BytesOffset = OffsetAPInt.getSExtValue();
401   return Base;
402 }
403 
404 static const Value *
405 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
406                                      const DataLayout &DL,
407                                      bool AllowNonInbounds = false) {
408   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
409   if (!Ptr)
410     return nullptr;
411 
412   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
413                                           AllowNonInbounds);
414 }
415 
416 /// Helper function to clamp a state \p S of type \p StateType with the
417 /// information in \p R and indicate/return if \p S did change (as-in update is
418 /// required to be run again).
419 template <typename StateType>
420 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
421   auto Assumed = S.getAssumed();
422   S ^= R;
423   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
424                                    : ChangeStatus::CHANGED;
425 }
426 
427 /// Clamp the information known for all returned values of a function
428 /// (identified by \p QueryingAA) into \p S.
429 template <typename AAType, typename StateType = typename AAType::StateType>
430 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
431                                      StateType &S) {
432   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
433                     << QueryingAA << " into " << S << "\n");
434 
435   assert((QueryingAA.getIRPosition().getPositionKind() ==
436               IRPosition::IRP_RETURNED ||
437           QueryingAA.getIRPosition().getPositionKind() ==
438               IRPosition::IRP_CALL_SITE_RETURNED) &&
439          "Can only clamp returned value states for a function returned or call "
440          "site returned position!");
441 
442   // Use an optional state as there might not be any return values and we want
443   // to join (IntegerState::operator&) the state of all there are.
444   Optional<StateType> T;
445 
446   // Callback for each possibly returned value.
447   auto CheckReturnValue = [&](Value &RV) -> bool {
448     const IRPosition &RVPos = IRPosition::value(RV);
449     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
450     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
451                       << " @ " << RVPos << "\n");
452     const StateType &AAS = static_cast<const StateType &>(AA.getState());
453     if (T.hasValue())
454       *T &= AAS;
455     else
456       T = AAS;
457     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
458                       << "\n");
459     return T->isValidState();
460   };
461 
462   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
463     S.indicatePessimisticFixpoint();
464   else if (T.hasValue())
465     S ^= *T;
466 }
467 
468 /// Helper class for generic deduction: return value -> returned position.
469 template <typename AAType, typename BaseType,
470           typename StateType = typename BaseType::StateType>
471 struct AAReturnedFromReturnedValues : public BaseType {
472   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
473       : BaseType(IRP, A) {}
474 
475   /// See AbstractAttribute::updateImpl(...).
476   ChangeStatus updateImpl(Attributor &A) override {
477     StateType S(StateType::getBestState(this->getState()));
478     clampReturnedValueStates<AAType, StateType>(A, *this, S);
479     // TODO: If we know we visited all returned values, thus no are assumed
480     // dead, we can take the known information from the state T.
481     return clampStateAndIndicateChange<StateType>(this->getState(), S);
482   }
483 };
484 
485 /// Clamp the information known at all call sites for a given argument
486 /// (identified by \p QueryingAA) into \p S.
487 template <typename AAType, typename StateType = typename AAType::StateType>
488 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
489                                         StateType &S) {
490   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
491                     << QueryingAA << " into " << S << "\n");
492 
493   assert(QueryingAA.getIRPosition().getPositionKind() ==
494              IRPosition::IRP_ARGUMENT &&
495          "Can only clamp call site argument states for an argument position!");
496 
497   // Use an optional state as there might not be any return values and we want
498   // to join (IntegerState::operator&) the state of all there are.
499   Optional<StateType> T;
500 
501   // The argument number which is also the call site argument number.
502   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
503 
504   auto CallSiteCheck = [&](AbstractCallSite ACS) {
505     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
506     // Check if a coresponding argument was found or if it is on not associated
507     // (which can happen for callback calls).
508     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
509       return false;
510 
511     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
512     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
513                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
514     const StateType &AAS = static_cast<const StateType &>(AA.getState());
515     if (T.hasValue())
516       *T &= AAS;
517     else
518       T = AAS;
519     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
520                       << "\n");
521     return T->isValidState();
522   };
523 
524   bool AllCallSitesKnown;
525   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
526                               AllCallSitesKnown))
527     S.indicatePessimisticFixpoint();
528   else if (T.hasValue())
529     S ^= *T;
530 }
531 
532 /// Helper class for generic deduction: call site argument -> argument position.
533 template <typename AAType, typename BaseType,
534           typename StateType = typename AAType::StateType>
535 struct AAArgumentFromCallSiteArguments : public BaseType {
536   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
537       : BaseType(IRP, A) {}
538 
539   /// See AbstractAttribute::updateImpl(...).
540   ChangeStatus updateImpl(Attributor &A) override {
541     StateType S(StateType::getBestState(this->getState()));
542     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
543     // TODO: If we know we visited all incoming values, thus no are assumed
544     // dead, we can take the known information from the state T.
545     return clampStateAndIndicateChange<StateType>(this->getState(), S);
546   }
547 };
548 
549 /// Helper class for generic replication: function returned -> cs returned.
550 template <typename AAType, typename BaseType,
551           typename StateType = typename BaseType::StateType>
552 struct AACallSiteReturnedFromReturned : public BaseType {
553   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
554       : BaseType(IRP, A) {}
555 
556   /// See AbstractAttribute::updateImpl(...).
557   ChangeStatus updateImpl(Attributor &A) override {
558     assert(this->getIRPosition().getPositionKind() ==
559                IRPosition::IRP_CALL_SITE_RETURNED &&
560            "Can only wrap function returned positions for call site returned "
561            "positions!");
562     auto &S = this->getState();
563 
564     const Function *AssociatedFunction =
565         this->getIRPosition().getAssociatedFunction();
566     if (!AssociatedFunction)
567       return S.indicatePessimisticFixpoint();
568 
569     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
570     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
571     return clampStateAndIndicateChange(
572         S, static_cast<const StateType &>(AA.getState()));
573   }
574 };
575 
576 /// Helper function to accumulate uses.
577 template <class AAType, typename StateType = typename AAType::StateType>
578 static void followUsesInContext(AAType &AA, Attributor &A,
579                                 MustBeExecutedContextExplorer &Explorer,
580                                 const Instruction *CtxI,
581                                 SetVector<const Use *> &Uses,
582                                 StateType &State) {
583   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
584   for (unsigned u = 0; u < Uses.size(); ++u) {
585     const Use *U = Uses[u];
586     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
587       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
588       if (Found && AA.followUseInMBEC(A, U, UserI, State))
589         for (const Use &Us : UserI->uses())
590           Uses.insert(&Us);
591     }
592   }
593 }
594 
595 /// Use the must-be-executed-context around \p I to add information into \p S.
596 /// The AAType class is required to have `followUseInMBEC` method with the
597 /// following signature and behaviour:
598 ///
599 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
600 /// U - Underlying use.
601 /// I - The user of the \p U.
602 /// Returns true if the value should be tracked transitively.
603 ///
604 template <class AAType, typename StateType = typename AAType::StateType>
605 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
606                              Instruction &CtxI) {
607 
608   // Container for (transitive) uses of the associated value.
609   SetVector<const Use *> Uses;
610   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
611     Uses.insert(&U);
612 
613   MustBeExecutedContextExplorer &Explorer =
614       A.getInfoCache().getMustBeExecutedContextExplorer();
615 
616   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
617 
618   if (S.isAtFixpoint())
619     return;
620 
621   SmallVector<const BranchInst *, 4> BrInsts;
622   auto Pred = [&](const Instruction *I) {
623     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
624       if (Br->isConditional())
625         BrInsts.push_back(Br);
626     return true;
627   };
628 
629   // Here, accumulate conditional branch instructions in the context. We
630   // explore the child paths and collect the known states. The disjunction of
631   // those states can be merged to its own state. Let ParentState_i be a state
632   // to indicate the known information for an i-th branch instruction in the
633   // context. ChildStates are created for its successors respectively.
634   //
635   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
636   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
637   //      ...
638   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
639   //
640   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
641   //
642   // FIXME: Currently, recursive branches are not handled. For example, we
643   // can't deduce that ptr must be dereferenced in below function.
644   //
645   // void f(int a, int c, int *ptr) {
646   //    if(a)
647   //      if (b) {
648   //        *ptr = 0;
649   //      } else {
650   //        *ptr = 1;
651   //      }
652   //    else {
653   //      if (b) {
654   //        *ptr = 0;
655   //      } else {
656   //        *ptr = 1;
657   //      }
658   //    }
659   // }
660 
661   Explorer.checkForAllContext(&CtxI, Pred);
662   for (const BranchInst *Br : BrInsts) {
663     StateType ParentState;
664 
665     // The known state of the parent state is a conjunction of children's
666     // known states so it is initialized with a best state.
667     ParentState.indicateOptimisticFixpoint();
668 
669     for (const BasicBlock *BB : Br->successors()) {
670       StateType ChildState;
671 
672       size_t BeforeSize = Uses.size();
673       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
674 
675       // Erase uses which only appear in the child.
676       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
677         It = Uses.erase(It);
678 
679       ParentState &= ChildState;
680     }
681 
682     // Use only known state.
683     S += ParentState;
684   }
685 }
686 
687 /// -----------------------NoUnwind Function Attribute--------------------------
688 
689 struct AANoUnwindImpl : AANoUnwind {
690   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
691 
692   const std::string getAsStr() const override {
693     return getAssumed() ? "nounwind" : "may-unwind";
694   }
695 
696   /// See AbstractAttribute::updateImpl(...).
697   ChangeStatus updateImpl(Attributor &A) override {
698     auto Opcodes = {
699         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
700         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
701         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
702 
703     auto CheckForNoUnwind = [&](Instruction &I) {
704       if (!I.mayThrow())
705         return true;
706 
707       if (const auto *CB = dyn_cast<CallBase>(&I)) {
708         const auto &NoUnwindAA =
709             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
710         return NoUnwindAA.isAssumedNoUnwind();
711       }
712       return false;
713     };
714 
715     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
716       return indicatePessimisticFixpoint();
717 
718     return ChangeStatus::UNCHANGED;
719   }
720 };
721 
722 struct AANoUnwindFunction final : public AANoUnwindImpl {
723   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
724       : AANoUnwindImpl(IRP, A) {}
725 
726   /// See AbstractAttribute::trackStatistics()
727   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
728 };
729 
730 /// NoUnwind attribute deduction for a call sites.
731 struct AANoUnwindCallSite final : AANoUnwindImpl {
732   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
733       : AANoUnwindImpl(IRP, A) {}
734 
735   /// See AbstractAttribute::initialize(...).
736   void initialize(Attributor &A) override {
737     AANoUnwindImpl::initialize(A);
738     Function *F = getAssociatedFunction();
739     if (!F)
740       indicatePessimisticFixpoint();
741   }
742 
743   /// See AbstractAttribute::updateImpl(...).
744   ChangeStatus updateImpl(Attributor &A) override {
745     // TODO: Once we have call site specific value information we can provide
746     //       call site specific liveness information and then it makes
747     //       sense to specialize attributes for call sites arguments instead of
748     //       redirecting requests to the callee argument.
749     Function *F = getAssociatedFunction();
750     const IRPosition &FnPos = IRPosition::function(*F);
751     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
752     return clampStateAndIndicateChange(
753         getState(),
754         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
755   }
756 
757   /// See AbstractAttribute::trackStatistics()
758   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
759 };
760 
761 /// --------------------- Function Return Values -------------------------------
762 
763 /// "Attribute" that collects all potential returned values and the return
764 /// instructions that they arise from.
765 ///
766 /// If there is a unique returned value R, the manifest method will:
767 ///   - mark R with the "returned" attribute, if R is an argument.
768 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
769 
770   /// Mapping of values potentially returned by the associated function to the
771   /// return instructions that might return them.
772   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
773 
774   /// Mapping to remember the number of returned values for a call site such
775   /// that we can avoid updates if nothing changed.
776   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
777 
778   /// Set of unresolved calls returned by the associated function.
779   SmallSetVector<CallBase *, 4> UnresolvedCalls;
780 
781   /// State flags
782   ///
783   ///{
784   bool IsFixed = false;
785   bool IsValidState = true;
786   ///}
787 
788 public:
789   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
790       : AAReturnedValues(IRP, A) {}
791 
792   /// See AbstractAttribute::initialize(...).
793   void initialize(Attributor &A) override {
794     // Reset the state.
795     IsFixed = false;
796     IsValidState = true;
797     ReturnedValues.clear();
798 
799     Function *F = getAssociatedFunction();
800     if (!F) {
801       indicatePessimisticFixpoint();
802       return;
803     }
804     assert(!F->getReturnType()->isVoidTy() &&
805            "Did not expect a void return type!");
806 
807     // The map from instruction opcodes to those instructions in the function.
808     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
809 
810     // Look through all arguments, if one is marked as returned we are done.
811     for (Argument &Arg : F->args()) {
812       if (Arg.hasReturnedAttr()) {
813         auto &ReturnInstSet = ReturnedValues[&Arg];
814         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
815           for (Instruction *RI : *Insts)
816             ReturnInstSet.insert(cast<ReturnInst>(RI));
817 
818         indicateOptimisticFixpoint();
819         return;
820       }
821     }
822 
823     if (!A.isFunctionIPOAmendable(*F))
824       indicatePessimisticFixpoint();
825   }
826 
827   /// See AbstractAttribute::manifest(...).
828   ChangeStatus manifest(Attributor &A) override;
829 
830   /// See AbstractAttribute::getState(...).
831   AbstractState &getState() override { return *this; }
832 
833   /// See AbstractAttribute::getState(...).
834   const AbstractState &getState() const override { return *this; }
835 
836   /// See AbstractAttribute::updateImpl(Attributor &A).
837   ChangeStatus updateImpl(Attributor &A) override;
838 
839   llvm::iterator_range<iterator> returned_values() override {
840     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
841   }
842 
843   llvm::iterator_range<const_iterator> returned_values() const override {
844     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
845   }
846 
847   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
848     return UnresolvedCalls;
849   }
850 
851   /// Return the number of potential return values, -1 if unknown.
852   size_t getNumReturnValues() const override {
853     return isValidState() ? ReturnedValues.size() : -1;
854   }
855 
856   /// Return an assumed unique return value if a single candidate is found. If
857   /// there cannot be one, return a nullptr. If it is not clear yet, return the
858   /// Optional::NoneType.
859   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
860 
861   /// See AbstractState::checkForAllReturnedValues(...).
862   bool checkForAllReturnedValuesAndReturnInsts(
863       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
864       const override;
865 
866   /// Pretty print the attribute similar to the IR representation.
867   const std::string getAsStr() const override;
868 
869   /// See AbstractState::isAtFixpoint().
870   bool isAtFixpoint() const override { return IsFixed; }
871 
872   /// See AbstractState::isValidState().
873   bool isValidState() const override { return IsValidState; }
874 
875   /// See AbstractState::indicateOptimisticFixpoint(...).
876   ChangeStatus indicateOptimisticFixpoint() override {
877     IsFixed = true;
878     return ChangeStatus::UNCHANGED;
879   }
880 
881   ChangeStatus indicatePessimisticFixpoint() override {
882     IsFixed = true;
883     IsValidState = false;
884     return ChangeStatus::CHANGED;
885   }
886 };
887 
888 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
889   ChangeStatus Changed = ChangeStatus::UNCHANGED;
890 
891   // Bookkeeping.
892   assert(isValidState());
893   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
894                   "Number of function with known return values");
895 
896   // Check if we have an assumed unique return value that we could manifest.
897   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
898 
899   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
900     return Changed;
901 
902   // Bookkeeping.
903   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
904                   "Number of function with unique return");
905 
906   // Callback to replace the uses of CB with the constant C.
907   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
908     if (CB.use_empty())
909       return ChangeStatus::UNCHANGED;
910     if (A.changeValueAfterManifest(CB, C))
911       return ChangeStatus::CHANGED;
912     return ChangeStatus::UNCHANGED;
913   };
914 
915   // If the assumed unique return value is an argument, annotate it.
916   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
917     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
918             getAssociatedFunction()->getReturnType())) {
919       getIRPosition() = IRPosition::argument(*UniqueRVArg);
920       Changed = IRAttribute::manifest(A);
921     }
922   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
923     // We can replace the returned value with the unique returned constant.
924     Value &AnchorValue = getAnchorValue();
925     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
926       for (const Use &U : F->uses())
927         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
928           if (CB->isCallee(&U)) {
929             Constant *RVCCast =
930                 CB->getType() == RVC->getType()
931                     ? RVC
932                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
933             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
934           }
935     } else {
936       assert(isa<CallBase>(AnchorValue) &&
937              "Expcected a function or call base anchor!");
938       Constant *RVCCast =
939           AnchorValue.getType() == RVC->getType()
940               ? RVC
941               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
942       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
943     }
944     if (Changed == ChangeStatus::CHANGED)
945       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
946                       "Number of function returns replaced by constant return");
947   }
948 
949   return Changed;
950 }
951 
952 const std::string AAReturnedValuesImpl::getAsStr() const {
953   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
954          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
955          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
956 }
957 
958 Optional<Value *>
959 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
960   // If checkForAllReturnedValues provides a unique value, ignoring potential
961   // undef values that can also be present, it is assumed to be the actual
962   // return value and forwarded to the caller of this method. If there are
963   // multiple, a nullptr is returned indicating there cannot be a unique
964   // returned value.
965   Optional<Value *> UniqueRV;
966 
967   auto Pred = [&](Value &RV) -> bool {
968     // If we found a second returned value and neither the current nor the saved
969     // one is an undef, there is no unique returned value. Undefs are special
970     // since we can pretend they have any value.
971     if (UniqueRV.hasValue() && UniqueRV != &RV &&
972         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
973       UniqueRV = nullptr;
974       return false;
975     }
976 
977     // Do not overwrite a value with an undef.
978     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
979       UniqueRV = &RV;
980 
981     return true;
982   };
983 
984   if (!A.checkForAllReturnedValues(Pred, *this))
985     UniqueRV = nullptr;
986 
987   return UniqueRV;
988 }
989 
990 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
991     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
992     const {
993   if (!isValidState())
994     return false;
995 
996   // Check all returned values but ignore call sites as long as we have not
997   // encountered an overdefined one during an update.
998   for (auto &It : ReturnedValues) {
999     Value *RV = It.first;
1000 
1001     CallBase *CB = dyn_cast<CallBase>(RV);
1002     if (CB && !UnresolvedCalls.count(CB))
1003       continue;
1004 
1005     if (!Pred(*RV, It.second))
1006       return false;
1007   }
1008 
1009   return true;
1010 }
1011 
1012 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1013   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1014   bool Changed = false;
1015 
1016   // State used in the value traversals starting in returned values.
1017   struct RVState {
1018     // The map in which we collect return values -> return instrs.
1019     decltype(ReturnedValues) &RetValsMap;
1020     // The flag to indicate a change.
1021     bool &Changed;
1022     // The return instrs we come from.
1023     SmallSetVector<ReturnInst *, 4> RetInsts;
1024   };
1025 
1026   // Callback for a leaf value returned by the associated function.
1027   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1028                          bool) -> bool {
1029     auto Size = RVS.RetValsMap[&Val].size();
1030     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1031     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1032     RVS.Changed |= Inserted;
1033     LLVM_DEBUG({
1034       if (Inserted)
1035         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1036                << " => " << RVS.RetInsts.size() << "\n";
1037     });
1038     return true;
1039   };
1040 
1041   // Helper method to invoke the generic value traversal.
1042   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1043                                 const Instruction *CtxI) {
1044     IRPosition RetValPos = IRPosition::value(RV);
1045     return genericValueTraversal<AAReturnedValues, RVState>(
1046         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1047         /* UseValueSimplify */ false);
1048   };
1049 
1050   // Callback for all "return intructions" live in the associated function.
1051   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1052     ReturnInst &Ret = cast<ReturnInst>(I);
1053     RVState RVS({ReturnedValues, Changed, {}});
1054     RVS.RetInsts.insert(&Ret);
1055     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1056   };
1057 
1058   // Start by discovering returned values from all live returned instructions in
1059   // the associated function.
1060   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1061     return indicatePessimisticFixpoint();
1062 
1063   // Once returned values "directly" present in the code are handled we try to
1064   // resolve returned calls. To avoid modifications to the ReturnedValues map
1065   // while we iterate over it we kept record of potential new entries in a copy
1066   // map, NewRVsMap.
1067   decltype(ReturnedValues) NewRVsMap;
1068 
1069   auto HandleReturnValue = [&](Value *RV,
1070                                SmallSetVector<ReturnInst *, 4> &RIs) {
1071     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1072                       << RIs.size() << " RIs\n");
1073     CallBase *CB = dyn_cast<CallBase>(RV);
1074     if (!CB || UnresolvedCalls.count(CB))
1075       return;
1076 
1077     if (!CB->getCalledFunction()) {
1078       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1079                         << "\n");
1080       UnresolvedCalls.insert(CB);
1081       return;
1082     }
1083 
1084     // TODO: use the function scope once we have call site AAReturnedValues.
1085     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1086         *this, IRPosition::function(*CB->getCalledFunction()));
1087     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1088                       << RetValAA << "\n");
1089 
1090     // Skip dead ends, thus if we do not know anything about the returned
1091     // call we mark it as unresolved and it will stay that way.
1092     if (!RetValAA.getState().isValidState()) {
1093       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1094                         << "\n");
1095       UnresolvedCalls.insert(CB);
1096       return;
1097     }
1098 
1099     // Do not try to learn partial information. If the callee has unresolved
1100     // return values we will treat the call as unresolved/opaque.
1101     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1102     if (!RetValAAUnresolvedCalls.empty()) {
1103       UnresolvedCalls.insert(CB);
1104       return;
1105     }
1106 
1107     // Now check if we can track transitively returned values. If possible, thus
1108     // if all return value can be represented in the current scope, do so.
1109     bool Unresolved = false;
1110     for (auto &RetValAAIt : RetValAA.returned_values()) {
1111       Value *RetVal = RetValAAIt.first;
1112       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1113           isa<Constant>(RetVal))
1114         continue;
1115       // Anything that did not fit in the above categories cannot be resolved,
1116       // mark the call as unresolved.
1117       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1118                            "cannot be translated: "
1119                         << *RetVal << "\n");
1120       UnresolvedCalls.insert(CB);
1121       Unresolved = true;
1122       break;
1123     }
1124 
1125     if (Unresolved)
1126       return;
1127 
1128     // Now track transitively returned values.
1129     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1130     if (NumRetAA == RetValAA.getNumReturnValues()) {
1131       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1132                            "changed since it was seen last\n");
1133       return;
1134     }
1135     NumRetAA = RetValAA.getNumReturnValues();
1136 
1137     for (auto &RetValAAIt : RetValAA.returned_values()) {
1138       Value *RetVal = RetValAAIt.first;
1139       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1140         // Arguments are mapped to call site operands and we begin the traversal
1141         // again.
1142         bool Unused = false;
1143         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1144         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1145         continue;
1146       } else if (isa<CallBase>(RetVal)) {
1147         // Call sites are resolved by the callee attribute over time, no need to
1148         // do anything for us.
1149         continue;
1150       } else if (isa<Constant>(RetVal)) {
1151         // Constants are valid everywhere, we can simply take them.
1152         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1153         continue;
1154       }
1155     }
1156   };
1157 
1158   for (auto &It : ReturnedValues)
1159     HandleReturnValue(It.first, It.second);
1160 
1161   // Because processing the new information can again lead to new return values
1162   // we have to be careful and iterate until this iteration is complete. The
1163   // idea is that we are in a stable state at the end of an update. All return
1164   // values have been handled and properly categorized. We might not update
1165   // again if we have not requested a non-fix attribute so we cannot "wait" for
1166   // the next update to analyze a new return value.
1167   while (!NewRVsMap.empty()) {
1168     auto It = std::move(NewRVsMap.back());
1169     NewRVsMap.pop_back();
1170 
1171     assert(!It.second.empty() && "Entry does not add anything.");
1172     auto &ReturnInsts = ReturnedValues[It.first];
1173     for (ReturnInst *RI : It.second)
1174       if (ReturnInsts.insert(RI)) {
1175         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1176                           << *It.first << " => " << *RI << "\n");
1177         HandleReturnValue(It.first, ReturnInsts);
1178         Changed = true;
1179       }
1180   }
1181 
1182   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1183   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1184 }
1185 
1186 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1187   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1188       : AAReturnedValuesImpl(IRP, A) {}
1189 
1190   /// See AbstractAttribute::trackStatistics()
1191   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1192 };
1193 
1194 /// Returned values information for a call sites.
1195 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1196   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1197       : AAReturnedValuesImpl(IRP, A) {}
1198 
1199   /// See AbstractAttribute::initialize(...).
1200   void initialize(Attributor &A) override {
1201     // TODO: Once we have call site specific value information we can provide
1202     //       call site specific liveness information and then it makes
1203     //       sense to specialize attributes for call sites instead of
1204     //       redirecting requests to the callee.
1205     llvm_unreachable("Abstract attributes for returned values are not "
1206                      "supported for call sites yet!");
1207   }
1208 
1209   /// See AbstractAttribute::updateImpl(...).
1210   ChangeStatus updateImpl(Attributor &A) override {
1211     return indicatePessimisticFixpoint();
1212   }
1213 
1214   /// See AbstractAttribute::trackStatistics()
1215   void trackStatistics() const override {}
1216 };
1217 
1218 /// ------------------------ NoSync Function Attribute -------------------------
1219 
1220 struct AANoSyncImpl : AANoSync {
1221   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1222 
1223   const std::string getAsStr() const override {
1224     return getAssumed() ? "nosync" : "may-sync";
1225   }
1226 
1227   /// See AbstractAttribute::updateImpl(...).
1228   ChangeStatus updateImpl(Attributor &A) override;
1229 
1230   /// Helper function used to determine whether an instruction is non-relaxed
1231   /// atomic. In other words, if an atomic instruction does not have unordered
1232   /// or monotonic ordering
1233   static bool isNonRelaxedAtomic(Instruction *I);
1234 
1235   /// Helper function used to determine whether an instruction is volatile.
1236   static bool isVolatile(Instruction *I);
1237 
1238   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1239   /// memset).
1240   static bool isNoSyncIntrinsic(Instruction *I);
1241 };
1242 
1243 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1244   if (!I->isAtomic())
1245     return false;
1246 
1247   AtomicOrdering Ordering;
1248   switch (I->getOpcode()) {
1249   case Instruction::AtomicRMW:
1250     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1251     break;
1252   case Instruction::Store:
1253     Ordering = cast<StoreInst>(I)->getOrdering();
1254     break;
1255   case Instruction::Load:
1256     Ordering = cast<LoadInst>(I)->getOrdering();
1257     break;
1258   case Instruction::Fence: {
1259     auto *FI = cast<FenceInst>(I);
1260     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1261       return false;
1262     Ordering = FI->getOrdering();
1263     break;
1264   }
1265   case Instruction::AtomicCmpXchg: {
1266     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1267     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1268     // Only if both are relaxed, than it can be treated as relaxed.
1269     // Otherwise it is non-relaxed.
1270     if (Success != AtomicOrdering::Unordered &&
1271         Success != AtomicOrdering::Monotonic)
1272       return true;
1273     if (Failure != AtomicOrdering::Unordered &&
1274         Failure != AtomicOrdering::Monotonic)
1275       return true;
1276     return false;
1277   }
1278   default:
1279     llvm_unreachable(
1280         "New atomic operations need to be known in the attributor.");
1281   }
1282 
1283   // Relaxed.
1284   if (Ordering == AtomicOrdering::Unordered ||
1285       Ordering == AtomicOrdering::Monotonic)
1286     return false;
1287   return true;
1288 }
1289 
1290 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1291 /// FIXME: We should ipmrove the handling of intrinsics.
1292 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1293   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1294     switch (II->getIntrinsicID()) {
1295     /// Element wise atomic memory intrinsics are can only be unordered,
1296     /// therefore nosync.
1297     case Intrinsic::memset_element_unordered_atomic:
1298     case Intrinsic::memmove_element_unordered_atomic:
1299     case Intrinsic::memcpy_element_unordered_atomic:
1300       return true;
1301     case Intrinsic::memset:
1302     case Intrinsic::memmove:
1303     case Intrinsic::memcpy:
1304       if (!cast<MemIntrinsic>(II)->isVolatile())
1305         return true;
1306       return false;
1307     default:
1308       return false;
1309     }
1310   }
1311   return false;
1312 }
1313 
1314 bool AANoSyncImpl::isVolatile(Instruction *I) {
1315   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1316 
1317   switch (I->getOpcode()) {
1318   case Instruction::AtomicRMW:
1319     return cast<AtomicRMWInst>(I)->isVolatile();
1320   case Instruction::Store:
1321     return cast<StoreInst>(I)->isVolatile();
1322   case Instruction::Load:
1323     return cast<LoadInst>(I)->isVolatile();
1324   case Instruction::AtomicCmpXchg:
1325     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1326   default:
1327     return false;
1328   }
1329 }
1330 
1331 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332 
1333   auto CheckRWInstForNoSync = [&](Instruction &I) {
1334     /// We are looking for volatile instructions or Non-Relaxed atomics.
1335     /// FIXME: We should improve the handling of intrinsics.
1336 
1337     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1338       return true;
1339 
1340     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1341       if (CB->hasFnAttr(Attribute::NoSync))
1342         return true;
1343 
1344       const auto &NoSyncAA =
1345           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1346       if (NoSyncAA.isAssumedNoSync())
1347         return true;
1348       return false;
1349     }
1350 
1351     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1352       return true;
1353 
1354     return false;
1355   };
1356 
1357   auto CheckForNoSync = [&](Instruction &I) {
1358     // At this point we handled all read/write effects and they are all
1359     // nosync, so they can be skipped.
1360     if (I.mayReadOrWriteMemory())
1361       return true;
1362 
1363     // non-convergent and readnone imply nosync.
1364     return !cast<CallBase>(I).isConvergent();
1365   };
1366 
1367   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1368       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1369     return indicatePessimisticFixpoint();
1370 
1371   return ChangeStatus::UNCHANGED;
1372 }
1373 
1374 struct AANoSyncFunction final : public AANoSyncImpl {
1375   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1376       : AANoSyncImpl(IRP, A) {}
1377 
1378   /// See AbstractAttribute::trackStatistics()
1379   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1380 };
1381 
1382 /// NoSync attribute deduction for a call sites.
1383 struct AANoSyncCallSite final : AANoSyncImpl {
1384   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1385       : AANoSyncImpl(IRP, A) {}
1386 
1387   /// See AbstractAttribute::initialize(...).
1388   void initialize(Attributor &A) override {
1389     AANoSyncImpl::initialize(A);
1390     Function *F = getAssociatedFunction();
1391     if (!F)
1392       indicatePessimisticFixpoint();
1393   }
1394 
1395   /// See AbstractAttribute::updateImpl(...).
1396   ChangeStatus updateImpl(Attributor &A) override {
1397     // TODO: Once we have call site specific value information we can provide
1398     //       call site specific liveness information and then it makes
1399     //       sense to specialize attributes for call sites arguments instead of
1400     //       redirecting requests to the callee argument.
1401     Function *F = getAssociatedFunction();
1402     const IRPosition &FnPos = IRPosition::function(*F);
1403     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1404     return clampStateAndIndicateChange(
1405         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1406   }
1407 
1408   /// See AbstractAttribute::trackStatistics()
1409   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1410 };
1411 
1412 /// ------------------------ No-Free Attributes ----------------------------
1413 
1414 struct AANoFreeImpl : public AANoFree {
1415   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1416 
1417   /// See AbstractAttribute::updateImpl(...).
1418   ChangeStatus updateImpl(Attributor &A) override {
1419     auto CheckForNoFree = [&](Instruction &I) {
1420       const auto &CB = cast<CallBase>(I);
1421       if (CB.hasFnAttr(Attribute::NoFree))
1422         return true;
1423 
1424       const auto &NoFreeAA =
1425           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1426       return NoFreeAA.isAssumedNoFree();
1427     };
1428 
1429     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1430       return indicatePessimisticFixpoint();
1431     return ChangeStatus::UNCHANGED;
1432   }
1433 
1434   /// See AbstractAttribute::getAsStr().
1435   const std::string getAsStr() const override {
1436     return getAssumed() ? "nofree" : "may-free";
1437   }
1438 };
1439 
1440 struct AANoFreeFunction final : public AANoFreeImpl {
1441   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1442       : AANoFreeImpl(IRP, A) {}
1443 
1444   /// See AbstractAttribute::trackStatistics()
1445   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1446 };
1447 
1448 /// NoFree attribute deduction for a call sites.
1449 struct AANoFreeCallSite final : AANoFreeImpl {
1450   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1451       : AANoFreeImpl(IRP, A) {}
1452 
1453   /// See AbstractAttribute::initialize(...).
1454   void initialize(Attributor &A) override {
1455     AANoFreeImpl::initialize(A);
1456     Function *F = getAssociatedFunction();
1457     if (!F)
1458       indicatePessimisticFixpoint();
1459   }
1460 
1461   /// See AbstractAttribute::updateImpl(...).
1462   ChangeStatus updateImpl(Attributor &A) override {
1463     // TODO: Once we have call site specific value information we can provide
1464     //       call site specific liveness information and then it makes
1465     //       sense to specialize attributes for call sites arguments instead of
1466     //       redirecting requests to the callee argument.
1467     Function *F = getAssociatedFunction();
1468     const IRPosition &FnPos = IRPosition::function(*F);
1469     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1470     return clampStateAndIndicateChange(
1471         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1472   }
1473 
1474   /// See AbstractAttribute::trackStatistics()
1475   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1476 };
1477 
1478 /// NoFree attribute for floating values.
1479 struct AANoFreeFloating : AANoFreeImpl {
1480   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1481       : AANoFreeImpl(IRP, A) {}
1482 
1483   /// See AbstractAttribute::trackStatistics()
1484   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1485 
1486   /// See Abstract Attribute::updateImpl(...).
1487   ChangeStatus updateImpl(Attributor &A) override {
1488     const IRPosition &IRP = getIRPosition();
1489 
1490     const auto &NoFreeAA =
1491         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1492     if (NoFreeAA.isAssumedNoFree())
1493       return ChangeStatus::UNCHANGED;
1494 
1495     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1496     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1497       Instruction *UserI = cast<Instruction>(U.getUser());
1498       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1499         if (CB->isBundleOperand(&U))
1500           return false;
1501         if (!CB->isArgOperand(&U))
1502           return true;
1503         unsigned ArgNo = CB->getArgOperandNo(&U);
1504 
1505         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1506             *this, IRPosition::callsite_argument(*CB, ArgNo));
1507         return NoFreeArg.isAssumedNoFree();
1508       }
1509 
1510       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1511           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1512         Follow = true;
1513         return true;
1514       }
1515       if (isa<ReturnInst>(UserI))
1516         return true;
1517 
1518       // Unknown user.
1519       return false;
1520     };
1521     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1522       return indicatePessimisticFixpoint();
1523 
1524     return ChangeStatus::UNCHANGED;
1525   }
1526 };
1527 
1528 /// NoFree attribute for a call site argument.
1529 struct AANoFreeArgument final : AANoFreeFloating {
1530   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1531       : AANoFreeFloating(IRP, A) {}
1532 
1533   /// See AbstractAttribute::trackStatistics()
1534   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1535 };
1536 
1537 /// NoFree attribute for call site arguments.
1538 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1539   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1540       : AANoFreeFloating(IRP, A) {}
1541 
1542   /// See AbstractAttribute::updateImpl(...).
1543   ChangeStatus updateImpl(Attributor &A) override {
1544     // TODO: Once we have call site specific value information we can provide
1545     //       call site specific liveness information and then it makes
1546     //       sense to specialize attributes for call sites arguments instead of
1547     //       redirecting requests to the callee argument.
1548     Argument *Arg = getAssociatedArgument();
1549     if (!Arg)
1550       return indicatePessimisticFixpoint();
1551     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1552     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1553     return clampStateAndIndicateChange(
1554         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1555   }
1556 
1557   /// See AbstractAttribute::trackStatistics()
1558   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1559 };
1560 
1561 /// NoFree attribute for function return value.
1562 struct AANoFreeReturned final : AANoFreeFloating {
1563   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1564       : AANoFreeFloating(IRP, A) {
1565     llvm_unreachable("NoFree is not applicable to function returns!");
1566   }
1567 
1568   /// See AbstractAttribute::initialize(...).
1569   void initialize(Attributor &A) override {
1570     llvm_unreachable("NoFree is not applicable to function returns!");
1571   }
1572 
1573   /// See AbstractAttribute::updateImpl(...).
1574   ChangeStatus updateImpl(Attributor &A) override {
1575     llvm_unreachable("NoFree is not applicable to function returns!");
1576   }
1577 
1578   /// See AbstractAttribute::trackStatistics()
1579   void trackStatistics() const override {}
1580 };
1581 
1582 /// NoFree attribute deduction for a call site return value.
1583 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1584   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1585       : AANoFreeFloating(IRP, A) {}
1586 
1587   ChangeStatus manifest(Attributor &A) override {
1588     return ChangeStatus::UNCHANGED;
1589   }
1590   /// See AbstractAttribute::trackStatistics()
1591   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1592 };
1593 
1594 /// ------------------------ NonNull Argument Attribute ------------------------
1595 static int64_t getKnownNonNullAndDerefBytesForUse(
1596     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1597     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1598   TrackUse = false;
1599 
1600   const Value *UseV = U->get();
1601   if (!UseV->getType()->isPointerTy())
1602     return 0;
1603 
1604   Type *PtrTy = UseV->getType();
1605   const Function *F = I->getFunction();
1606   bool NullPointerIsDefined =
1607       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1608   const DataLayout &DL = A.getInfoCache().getDL();
1609   if (const auto *CB = dyn_cast<CallBase>(I)) {
1610     if (CB->isBundleOperand(U)) {
1611       if (RetainedKnowledge RK = getKnowledgeFromUse(
1612               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1613         IsNonNull |=
1614             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1615         return RK.ArgValue;
1616       }
1617       return 0;
1618     }
1619 
1620     if (CB->isCallee(U)) {
1621       IsNonNull |= !NullPointerIsDefined;
1622       return 0;
1623     }
1624 
1625     unsigned ArgNo = CB->getArgOperandNo(U);
1626     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1627     // As long as we only use known information there is no need to track
1628     // dependences here.
1629     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1630                                                   /* TrackDependence */ false);
1631     IsNonNull |= DerefAA.isKnownNonNull();
1632     return DerefAA.getKnownDereferenceableBytes();
1633   }
1634 
1635   // We need to follow common pointer manipulation uses to the accesses they
1636   // feed into. We can try to be smart to avoid looking through things we do not
1637   // like for now, e.g., non-inbounds GEPs.
1638   if (isa<CastInst>(I)) {
1639     TrackUse = true;
1640     return 0;
1641   }
1642 
1643   if (isa<GetElementPtrInst>(I)) {
1644     TrackUse = true;
1645     return 0;
1646   }
1647 
1648   int64_t Offset;
1649   const Value *Base =
1650       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1651   if (Base) {
1652     if (Base == &AssociatedValue &&
1653         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1654       int64_t DerefBytes =
1655           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1656 
1657       IsNonNull |= !NullPointerIsDefined;
1658       return std::max(int64_t(0), DerefBytes);
1659     }
1660   }
1661 
1662   /// Corner case when an offset is 0.
1663   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1664                                               /*AllowNonInbounds*/ true);
1665   if (Base) {
1666     if (Offset == 0 && Base == &AssociatedValue &&
1667         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1668       int64_t DerefBytes =
1669           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1670       IsNonNull |= !NullPointerIsDefined;
1671       return std::max(int64_t(0), DerefBytes);
1672     }
1673   }
1674 
1675   return 0;
1676 }
1677 
1678 struct AANonNullImpl : AANonNull {
1679   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1680       : AANonNull(IRP, A),
1681         NullIsDefined(NullPointerIsDefined(
1682             getAnchorScope(),
1683             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1684 
1685   /// See AbstractAttribute::initialize(...).
1686   void initialize(Attributor &A) override {
1687     Value &V = getAssociatedValue();
1688     if (!NullIsDefined &&
1689         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1690                 /* IgnoreSubsumingPositions */ false, &A))
1691       indicateOptimisticFixpoint();
1692     else if (isa<ConstantPointerNull>(V))
1693       indicatePessimisticFixpoint();
1694     else
1695       AANonNull::initialize(A);
1696 
1697     bool CanBeNull = true;
1698     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull))
1699       if (!CanBeNull)
1700         indicateOptimisticFixpoint();
1701 
1702     if (!getState().isAtFixpoint())
1703       if (Instruction *CtxI = getCtxI())
1704         followUsesInMBEC(*this, A, getState(), *CtxI);
1705   }
1706 
1707   /// See followUsesInMBEC
1708   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1709                        AANonNull::StateType &State) {
1710     bool IsNonNull = false;
1711     bool TrackUse = false;
1712     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1713                                        IsNonNull, TrackUse);
1714     State.setKnown(IsNonNull);
1715     return TrackUse;
1716   }
1717 
1718   /// See AbstractAttribute::getAsStr().
1719   const std::string getAsStr() const override {
1720     return getAssumed() ? "nonnull" : "may-null";
1721   }
1722 
1723   /// Flag to determine if the underlying value can be null and still allow
1724   /// valid accesses.
1725   const bool NullIsDefined;
1726 };
1727 
1728 /// NonNull attribute for a floating value.
1729 struct AANonNullFloating : public AANonNullImpl {
1730   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1731       : AANonNullImpl(IRP, A) {}
1732 
1733   /// See AbstractAttribute::updateImpl(...).
1734   ChangeStatus updateImpl(Attributor &A) override {
1735     if (!NullIsDefined) {
1736       const auto &DerefAA =
1737           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1738       if (DerefAA.getAssumedDereferenceableBytes())
1739         return ChangeStatus::UNCHANGED;
1740     }
1741 
1742     const DataLayout &DL = A.getDataLayout();
1743 
1744     DominatorTree *DT = nullptr;
1745     AssumptionCache *AC = nullptr;
1746     InformationCache &InfoCache = A.getInfoCache();
1747     if (const Function *Fn = getAnchorScope()) {
1748       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1749       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1750     }
1751 
1752     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1753                             AANonNull::StateType &T, bool Stripped) -> bool {
1754       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1755       if (!Stripped && this == &AA) {
1756         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1757           T.indicatePessimisticFixpoint();
1758       } else {
1759         // Use abstract attribute information.
1760         const AANonNull::StateType &NS =
1761             static_cast<const AANonNull::StateType &>(AA.getState());
1762         T ^= NS;
1763       }
1764       return T.isValidState();
1765     };
1766 
1767     StateType T;
1768     if (!genericValueTraversal<AANonNull, StateType>(
1769             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1770       return indicatePessimisticFixpoint();
1771 
1772     return clampStateAndIndicateChange(getState(), T);
1773   }
1774 
1775   /// See AbstractAttribute::trackStatistics()
1776   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1777 };
1778 
1779 /// NonNull attribute for function return value.
1780 struct AANonNullReturned final
1781     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1782   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1783       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1784 
1785   /// See AbstractAttribute::trackStatistics()
1786   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1787 };
1788 
1789 /// NonNull attribute for function argument.
1790 struct AANonNullArgument final
1791     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1792   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1793       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1794 
1795   /// See AbstractAttribute::trackStatistics()
1796   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1797 };
1798 
1799 struct AANonNullCallSiteArgument final : AANonNullFloating {
1800   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1801       : AANonNullFloating(IRP, A) {}
1802 
1803   /// See AbstractAttribute::trackStatistics()
1804   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1805 };
1806 
1807 /// NonNull attribute for a call site return position.
1808 struct AANonNullCallSiteReturned final
1809     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1810   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1811       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1812 
1813   /// See AbstractAttribute::trackStatistics()
1814   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1815 };
1816 
1817 /// ------------------------ No-Recurse Attributes ----------------------------
1818 
1819 struct AANoRecurseImpl : public AANoRecurse {
1820   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1821 
1822   /// See AbstractAttribute::getAsStr()
1823   const std::string getAsStr() const override {
1824     return getAssumed() ? "norecurse" : "may-recurse";
1825   }
1826 };
1827 
1828 struct AANoRecurseFunction final : AANoRecurseImpl {
1829   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1830       : AANoRecurseImpl(IRP, A) {}
1831 
1832   /// See AbstractAttribute::initialize(...).
1833   void initialize(Attributor &A) override {
1834     AANoRecurseImpl::initialize(A);
1835     if (const Function *F = getAnchorScope())
1836       if (A.getInfoCache().getSccSize(*F) != 1)
1837         indicatePessimisticFixpoint();
1838   }
1839 
1840   /// See AbstractAttribute::updateImpl(...).
1841   ChangeStatus updateImpl(Attributor &A) override {
1842 
1843     // If all live call sites are known to be no-recurse, we are as well.
1844     auto CallSitePred = [&](AbstractCallSite ACS) {
1845       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1846           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1847           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1848       return NoRecurseAA.isKnownNoRecurse();
1849     };
1850     bool AllCallSitesKnown;
1851     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1852       // If we know all call sites and all are known no-recurse, we are done.
1853       // If all known call sites, which might not be all that exist, are known
1854       // to be no-recurse, we are not done but we can continue to assume
1855       // no-recurse. If one of the call sites we have not visited will become
1856       // live, another update is triggered.
1857       if (AllCallSitesKnown)
1858         indicateOptimisticFixpoint();
1859       return ChangeStatus::UNCHANGED;
1860     }
1861 
1862     // If the above check does not hold anymore we look at the calls.
1863     auto CheckForNoRecurse = [&](Instruction &I) {
1864       const auto &CB = cast<CallBase>(I);
1865       if (CB.hasFnAttr(Attribute::NoRecurse))
1866         return true;
1867 
1868       const auto &NoRecurseAA =
1869           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1870       if (!NoRecurseAA.isAssumedNoRecurse())
1871         return false;
1872 
1873       // Recursion to the same function
1874       if (CB.getCalledFunction() == getAnchorScope())
1875         return false;
1876 
1877       return true;
1878     };
1879 
1880     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1881       return indicatePessimisticFixpoint();
1882     return ChangeStatus::UNCHANGED;
1883   }
1884 
1885   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1886 };
1887 
1888 /// NoRecurse attribute deduction for a call sites.
1889 struct AANoRecurseCallSite final : AANoRecurseImpl {
1890   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1891       : AANoRecurseImpl(IRP, A) {}
1892 
1893   /// See AbstractAttribute::initialize(...).
1894   void initialize(Attributor &A) override {
1895     AANoRecurseImpl::initialize(A);
1896     Function *F = getAssociatedFunction();
1897     if (!F)
1898       indicatePessimisticFixpoint();
1899   }
1900 
1901   /// See AbstractAttribute::updateImpl(...).
1902   ChangeStatus updateImpl(Attributor &A) override {
1903     // TODO: Once we have call site specific value information we can provide
1904     //       call site specific liveness information and then it makes
1905     //       sense to specialize attributes for call sites arguments instead of
1906     //       redirecting requests to the callee argument.
1907     Function *F = getAssociatedFunction();
1908     const IRPosition &FnPos = IRPosition::function(*F);
1909     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1910     return clampStateAndIndicateChange(
1911         getState(),
1912         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1913   }
1914 
1915   /// See AbstractAttribute::trackStatistics()
1916   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1917 };
1918 
1919 /// -------------------- Undefined-Behavior Attributes ------------------------
1920 
1921 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1922   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1923       : AAUndefinedBehavior(IRP, A) {}
1924 
1925   /// See AbstractAttribute::updateImpl(...).
1926   // through a pointer (i.e. also branches etc.)
1927   ChangeStatus updateImpl(Attributor &A) override {
1928     const size_t UBPrevSize = KnownUBInsts.size();
1929     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1930 
1931     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1932       // Skip instructions that are already saved.
1933       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1934         return true;
1935 
1936       // If we reach here, we know we have an instruction
1937       // that accesses memory through a pointer operand,
1938       // for which getPointerOperand() should give it to us.
1939       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1940       assert(PtrOp &&
1941              "Expected pointer operand of memory accessing instruction");
1942 
1943       // Either we stopped and the appropriate action was taken,
1944       // or we got back a simplified value to continue.
1945       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1946       if (!SimplifiedPtrOp.hasValue())
1947         return true;
1948       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1949 
1950       // A memory access through a pointer is considered UB
1951       // only if the pointer has constant null value.
1952       // TODO: Expand it to not only check constant values.
1953       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1954         AssumedNoUBInsts.insert(&I);
1955         return true;
1956       }
1957       const Type *PtrTy = PtrOpVal->getType();
1958 
1959       // Because we only consider instructions inside functions,
1960       // assume that a parent function exists.
1961       const Function *F = I.getFunction();
1962 
1963       // A memory access using constant null pointer is only considered UB
1964       // if null pointer is _not_ defined for the target platform.
1965       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1966         AssumedNoUBInsts.insert(&I);
1967       else
1968         KnownUBInsts.insert(&I);
1969       return true;
1970     };
1971 
1972     auto InspectBrInstForUB = [&](Instruction &I) {
1973       // A conditional branch instruction is considered UB if it has `undef`
1974       // condition.
1975 
1976       // Skip instructions that are already saved.
1977       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1978         return true;
1979 
1980       // We know we have a branch instruction.
1981       auto BrInst = cast<BranchInst>(&I);
1982 
1983       // Unconditional branches are never considered UB.
1984       if (BrInst->isUnconditional())
1985         return true;
1986 
1987       // Either we stopped and the appropriate action was taken,
1988       // or we got back a simplified value to continue.
1989       Optional<Value *> SimplifiedCond =
1990           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1991       if (!SimplifiedCond.hasValue())
1992         return true;
1993       AssumedNoUBInsts.insert(&I);
1994       return true;
1995     };
1996 
1997     auto InspectCallSiteForUB = [&](Instruction &I) {
1998       // Check whether a callsite always cause UB or not
1999 
2000       // Skip instructions that are already saved.
2001       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2002         return true;
2003 
2004       // Check nonnull and noundef argument attribute violation for each
2005       // callsite.
2006       CallBase &CB = cast<CallBase>(I);
2007       Function *Callee = CB.getCalledFunction();
2008       if (!Callee)
2009         return true;
2010       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2011         // If current argument is known to be simplified to null pointer and the
2012         // corresponding argument position is known to have nonnull attribute,
2013         // the argument is poison. Furthermore, if the argument is poison and
2014         // the position is known to have noundef attriubte, this callsite is
2015         // considered UB.
2016         // TODO: Check also nopoison attribute if it is introduced.
2017         if (idx >= Callee->arg_size())
2018           break;
2019         Value *ArgVal = CB.getArgOperand(idx);
2020         if (!ArgVal)
2021           continue;
2022         IRPosition CalleeArgumentIRP =
2023             IRPosition::argument(*Callee->getArg(idx));
2024         if (!CalleeArgumentIRP.hasAttr({Attribute::NoUndef}))
2025           continue;
2026         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP);
2027         if (!NonNullAA.isKnownNonNull())
2028           continue;
2029         const auto &ValueSimplifyAA =
2030             A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*ArgVal));
2031         Optional<Value *> SimplifiedVal =
2032             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2033 
2034         if (!ValueSimplifyAA.isKnown())
2035           continue;
2036         // Here, we handle three cases.
2037         //   (1) Not having a value means it is dead. (we can replace the value
2038         //       with undef)
2039         //   (2) Simplified to null pointer. The argument is a poison value and
2040         //       violate noundef attribute.
2041         //   (3) Simplified to undef. The argument violate noundef attriubte.
2042         if (!SimplifiedVal.hasValue() ||
2043             isa<ConstantPointerNull>(*SimplifiedVal.getValue()) ||
2044             isa<UndefValue>(*SimplifiedVal.getValue())) {
2045           KnownUBInsts.insert(&I);
2046           return true;
2047         }
2048       }
2049       return true;
2050     };
2051 
2052     auto InspectReturnInstForUB =
2053         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2054           // Check if a return instruction always cause UB or not
2055           // Note: It is guaranteed that the returned position of the anchor
2056           //       scope has noundef attribute when this is called.
2057 
2058           // When the returned position has noundef attriubte, UB occur in the
2059           // following cases.
2060           //   (1) Returned value is known to be undef.
2061           //   (2) The value is known to be a null pointer and the returned
2062           //       position has nonnull attribute (because the returned value is
2063           //       poison).
2064           // Note: This callback is not called for a dead returned value because
2065           //       such values are ignored in
2066           //       checkForAllReturnedValuesAndReturnedInsts.
2067           bool FoundUB = false;
2068           if (isa<UndefValue>(V)) {
2069             FoundUB = true;
2070           } else {
2071             auto &NonNullAA = A.getAAFor<AANonNull>(
2072                 *this, IRPosition::returned(*getAnchorScope()));
2073             if (NonNullAA.isKnownNonNull() && isa<ConstantPointerNull>(V))
2074               FoundUB = true;
2075           }
2076 
2077           if (FoundUB)
2078             for (ReturnInst *RI : RetInsts)
2079               KnownUBInsts.insert(RI);
2080           return true;
2081         };
2082 
2083     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2084                               {Instruction::Load, Instruction::Store,
2085                                Instruction::AtomicCmpXchg,
2086                                Instruction::AtomicRMW},
2087                               /* CheckBBLivenessOnly */ true);
2088     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2089                               /* CheckBBLivenessOnly */ true);
2090     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2091 
2092     // If the returned position of the anchor scope has noundef attriubte, check
2093     // all returned instructions.
2094     // TODO: If AANoUndef is implemented, ask it here.
2095     if (IRPosition::returned(*getAnchorScope()).hasAttr({Attribute::NoUndef}))
2096       A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, *this);
2097 
2098     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2099         UBPrevSize != KnownUBInsts.size())
2100       return ChangeStatus::CHANGED;
2101     return ChangeStatus::UNCHANGED;
2102   }
2103 
2104   bool isKnownToCauseUB(Instruction *I) const override {
2105     return KnownUBInsts.count(I);
2106   }
2107 
2108   bool isAssumedToCauseUB(Instruction *I) const override {
2109     // In simple words, if an instruction is not in the assumed to _not_
2110     // cause UB, then it is assumed UB (that includes those
2111     // in the KnownUBInsts set). The rest is boilerplate
2112     // is to ensure that it is one of the instructions we test
2113     // for UB.
2114 
2115     switch (I->getOpcode()) {
2116     case Instruction::Load:
2117     case Instruction::Store:
2118     case Instruction::AtomicCmpXchg:
2119     case Instruction::AtomicRMW:
2120       return !AssumedNoUBInsts.count(I);
2121     case Instruction::Br: {
2122       auto BrInst = cast<BranchInst>(I);
2123       if (BrInst->isUnconditional())
2124         return false;
2125       return !AssumedNoUBInsts.count(I);
2126     } break;
2127     default:
2128       return false;
2129     }
2130     return false;
2131   }
2132 
2133   ChangeStatus manifest(Attributor &A) override {
2134     if (KnownUBInsts.empty())
2135       return ChangeStatus::UNCHANGED;
2136     for (Instruction *I : KnownUBInsts)
2137       A.changeToUnreachableAfterManifest(I);
2138     return ChangeStatus::CHANGED;
2139   }
2140 
2141   /// See AbstractAttribute::getAsStr()
2142   const std::string getAsStr() const override {
2143     return getAssumed() ? "undefined-behavior" : "no-ub";
2144   }
2145 
2146   /// Note: The correctness of this analysis depends on the fact that the
2147   /// following 2 sets will stop changing after some point.
2148   /// "Change" here means that their size changes.
2149   /// The size of each set is monotonically increasing
2150   /// (we only add items to them) and it is upper bounded by the number of
2151   /// instructions in the processed function (we can never save more
2152   /// elements in either set than this number). Hence, at some point,
2153   /// they will stop increasing.
2154   /// Consequently, at some point, both sets will have stopped
2155   /// changing, effectively making the analysis reach a fixpoint.
2156 
2157   /// Note: These 2 sets are disjoint and an instruction can be considered
2158   /// one of 3 things:
2159   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2160   ///    the KnownUBInsts set.
2161   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2162   ///    has a reason to assume it).
2163   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2164   ///    could not find a reason to assume or prove that it can cause UB,
2165   ///    hence it assumes it doesn't. We have a set for these instructions
2166   ///    so that we don't reprocess them in every update.
2167   ///    Note however that instructions in this set may cause UB.
2168 
2169 protected:
2170   /// A set of all live instructions _known_ to cause UB.
2171   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2172 
2173 private:
2174   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2175   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2176 
2177   // Should be called on updates in which if we're processing an instruction
2178   // \p I that depends on a value \p V, one of the following has to happen:
2179   // - If the value is assumed, then stop.
2180   // - If the value is known but undef, then consider it UB.
2181   // - Otherwise, do specific processing with the simplified value.
2182   // We return None in the first 2 cases to signify that an appropriate
2183   // action was taken and the caller should stop.
2184   // Otherwise, we return the simplified value that the caller should
2185   // use for specific processing.
2186   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2187                                          Instruction *I) {
2188     const auto &ValueSimplifyAA =
2189         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2190     Optional<Value *> SimplifiedV =
2191         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2192     if (!ValueSimplifyAA.isKnown()) {
2193       // Don't depend on assumed values.
2194       return llvm::None;
2195     }
2196     if (!SimplifiedV.hasValue()) {
2197       // If it is known (which we tested above) but it doesn't have a value,
2198       // then we can assume `undef` and hence the instruction is UB.
2199       KnownUBInsts.insert(I);
2200       return llvm::None;
2201     }
2202     Value *Val = SimplifiedV.getValue();
2203     if (isa<UndefValue>(Val)) {
2204       KnownUBInsts.insert(I);
2205       return llvm::None;
2206     }
2207     return Val;
2208   }
2209 };
2210 
2211 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2212   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2213       : AAUndefinedBehaviorImpl(IRP, A) {}
2214 
2215   /// See AbstractAttribute::trackStatistics()
2216   void trackStatistics() const override {
2217     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2218                "Number of instructions known to have UB");
2219     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2220         KnownUBInsts.size();
2221   }
2222 };
2223 
2224 /// ------------------------ Will-Return Attributes ----------------------------
2225 
2226 // Helper function that checks whether a function has any cycle which we don't
2227 // know if it is bounded or not.
2228 // Loops with maximum trip count are considered bounded, any other cycle not.
2229 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2230   ScalarEvolution *SE =
2231       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2232   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2233   // If either SCEV or LoopInfo is not available for the function then we assume
2234   // any cycle to be unbounded cycle.
2235   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2236   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2237   if (!SE || !LI) {
2238     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2239       if (SCCI.hasCycle())
2240         return true;
2241     return false;
2242   }
2243 
2244   // If there's irreducible control, the function may contain non-loop cycles.
2245   if (mayContainIrreducibleControl(F, LI))
2246     return true;
2247 
2248   // Any loop that does not have a max trip count is considered unbounded cycle.
2249   for (auto *L : LI->getLoopsInPreorder()) {
2250     if (!SE->getSmallConstantMaxTripCount(L))
2251       return true;
2252   }
2253   return false;
2254 }
2255 
2256 struct AAWillReturnImpl : public AAWillReturn {
2257   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2258       : AAWillReturn(IRP, A) {}
2259 
2260   /// See AbstractAttribute::initialize(...).
2261   void initialize(Attributor &A) override {
2262     AAWillReturn::initialize(A);
2263 
2264     Function *F = getAnchorScope();
2265     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2266       indicatePessimisticFixpoint();
2267   }
2268 
2269   /// See AbstractAttribute::updateImpl(...).
2270   ChangeStatus updateImpl(Attributor &A) override {
2271     auto CheckForWillReturn = [&](Instruction &I) {
2272       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2273       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2274       if (WillReturnAA.isKnownWillReturn())
2275         return true;
2276       if (!WillReturnAA.isAssumedWillReturn())
2277         return false;
2278       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2279       return NoRecurseAA.isAssumedNoRecurse();
2280     };
2281 
2282     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2283       return indicatePessimisticFixpoint();
2284 
2285     return ChangeStatus::UNCHANGED;
2286   }
2287 
2288   /// See AbstractAttribute::getAsStr()
2289   const std::string getAsStr() const override {
2290     return getAssumed() ? "willreturn" : "may-noreturn";
2291   }
2292 };
2293 
2294 struct AAWillReturnFunction final : AAWillReturnImpl {
2295   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2296       : AAWillReturnImpl(IRP, A) {}
2297 
2298   /// See AbstractAttribute::trackStatistics()
2299   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2300 };
2301 
2302 /// WillReturn attribute deduction for a call sites.
2303 struct AAWillReturnCallSite final : AAWillReturnImpl {
2304   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2305       : AAWillReturnImpl(IRP, A) {}
2306 
2307   /// See AbstractAttribute::initialize(...).
2308   void initialize(Attributor &A) override {
2309     AAWillReturnImpl::initialize(A);
2310     Function *F = getAssociatedFunction();
2311     if (!F)
2312       indicatePessimisticFixpoint();
2313   }
2314 
2315   /// See AbstractAttribute::updateImpl(...).
2316   ChangeStatus updateImpl(Attributor &A) override {
2317     // TODO: Once we have call site specific value information we can provide
2318     //       call site specific liveness information and then it makes
2319     //       sense to specialize attributes for call sites arguments instead of
2320     //       redirecting requests to the callee argument.
2321     Function *F = getAssociatedFunction();
2322     const IRPosition &FnPos = IRPosition::function(*F);
2323     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2324     return clampStateAndIndicateChange(
2325         getState(),
2326         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2327   }
2328 
2329   /// See AbstractAttribute::trackStatistics()
2330   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2331 };
2332 
2333 /// -------------------AAReachability Attribute--------------------------
2334 
2335 struct AAReachabilityImpl : AAReachability {
2336   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2337       : AAReachability(IRP, A) {}
2338 
2339   const std::string getAsStr() const override {
2340     // TODO: Return the number of reachable queries.
2341     return "reachable";
2342   }
2343 
2344   /// See AbstractAttribute::initialize(...).
2345   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2346 
2347   /// See AbstractAttribute::updateImpl(...).
2348   ChangeStatus updateImpl(Attributor &A) override {
2349     return indicatePessimisticFixpoint();
2350   }
2351 };
2352 
2353 struct AAReachabilityFunction final : public AAReachabilityImpl {
2354   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2355       : AAReachabilityImpl(IRP, A) {}
2356 
2357   /// See AbstractAttribute::trackStatistics()
2358   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2359 };
2360 
2361 /// ------------------------ NoAlias Argument Attribute ------------------------
2362 
2363 struct AANoAliasImpl : AANoAlias {
2364   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2365     assert(getAssociatedType()->isPointerTy() &&
2366            "Noalias is a pointer attribute");
2367   }
2368 
2369   const std::string getAsStr() const override {
2370     return getAssumed() ? "noalias" : "may-alias";
2371   }
2372 };
2373 
2374 /// NoAlias attribute for a floating value.
2375 struct AANoAliasFloating final : AANoAliasImpl {
2376   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2377       : AANoAliasImpl(IRP, A) {}
2378 
2379   /// See AbstractAttribute::initialize(...).
2380   void initialize(Attributor &A) override {
2381     AANoAliasImpl::initialize(A);
2382     Value *Val = &getAssociatedValue();
2383     do {
2384       CastInst *CI = dyn_cast<CastInst>(Val);
2385       if (!CI)
2386         break;
2387       Value *Base = CI->getOperand(0);
2388       if (!Base->hasOneUse())
2389         break;
2390       Val = Base;
2391     } while (true);
2392 
2393     if (!Val->getType()->isPointerTy()) {
2394       indicatePessimisticFixpoint();
2395       return;
2396     }
2397 
2398     if (isa<AllocaInst>(Val))
2399       indicateOptimisticFixpoint();
2400     else if (isa<ConstantPointerNull>(Val) &&
2401              !NullPointerIsDefined(getAnchorScope(),
2402                                    Val->getType()->getPointerAddressSpace()))
2403       indicateOptimisticFixpoint();
2404     else if (Val != &getAssociatedValue()) {
2405       const auto &ValNoAliasAA =
2406           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2407       if (ValNoAliasAA.isKnownNoAlias())
2408         indicateOptimisticFixpoint();
2409     }
2410   }
2411 
2412   /// See AbstractAttribute::updateImpl(...).
2413   ChangeStatus updateImpl(Attributor &A) override {
2414     // TODO: Implement this.
2415     return indicatePessimisticFixpoint();
2416   }
2417 
2418   /// See AbstractAttribute::trackStatistics()
2419   void trackStatistics() const override {
2420     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2421   }
2422 };
2423 
2424 /// NoAlias attribute for an argument.
2425 struct AANoAliasArgument final
2426     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2427   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2428   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2429 
2430   /// See AbstractAttribute::initialize(...).
2431   void initialize(Attributor &A) override {
2432     Base::initialize(A);
2433     // See callsite argument attribute and callee argument attribute.
2434     if (hasAttr({Attribute::ByVal}))
2435       indicateOptimisticFixpoint();
2436   }
2437 
2438   /// See AbstractAttribute::update(...).
2439   ChangeStatus updateImpl(Attributor &A) override {
2440     // We have to make sure no-alias on the argument does not break
2441     // synchronization when this is a callback argument, see also [1] below.
2442     // If synchronization cannot be affected, we delegate to the base updateImpl
2443     // function, otherwise we give up for now.
2444 
2445     // If the function is no-sync, no-alias cannot break synchronization.
2446     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2447         *this, IRPosition::function_scope(getIRPosition()));
2448     if (NoSyncAA.isAssumedNoSync())
2449       return Base::updateImpl(A);
2450 
2451     // If the argument is read-only, no-alias cannot break synchronization.
2452     const auto &MemBehaviorAA =
2453         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2454     if (MemBehaviorAA.isAssumedReadOnly())
2455       return Base::updateImpl(A);
2456 
2457     // If the argument is never passed through callbacks, no-alias cannot break
2458     // synchronization.
2459     bool AllCallSitesKnown;
2460     if (A.checkForAllCallSites(
2461             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2462             true, AllCallSitesKnown))
2463       return Base::updateImpl(A);
2464 
2465     // TODO: add no-alias but make sure it doesn't break synchronization by
2466     // introducing fake uses. See:
2467     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2468     //     International Workshop on OpenMP 2018,
2469     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2470 
2471     return indicatePessimisticFixpoint();
2472   }
2473 
2474   /// See AbstractAttribute::trackStatistics()
2475   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2476 };
2477 
2478 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2479   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2480       : AANoAliasImpl(IRP, A) {}
2481 
2482   /// See AbstractAttribute::initialize(...).
2483   void initialize(Attributor &A) override {
2484     // See callsite argument attribute and callee argument attribute.
2485     const auto &CB = cast<CallBase>(getAnchorValue());
2486     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2487       indicateOptimisticFixpoint();
2488     Value &Val = getAssociatedValue();
2489     if (isa<ConstantPointerNull>(Val) &&
2490         !NullPointerIsDefined(getAnchorScope(),
2491                               Val.getType()->getPointerAddressSpace()))
2492       indicateOptimisticFixpoint();
2493   }
2494 
2495   /// Determine if the underlying value may alias with the call site argument
2496   /// \p OtherArgNo of \p ICS (= the underlying call site).
2497   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2498                             const AAMemoryBehavior &MemBehaviorAA,
2499                             const CallBase &CB, unsigned OtherArgNo) {
2500     // We do not need to worry about aliasing with the underlying IRP.
2501     if (this->getArgNo() == (int)OtherArgNo)
2502       return false;
2503 
2504     // If it is not a pointer or pointer vector we do not alias.
2505     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2506     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2507       return false;
2508 
2509     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2510         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2511         /* TrackDependence */ false);
2512 
2513     // If the argument is readnone, there is no read-write aliasing.
2514     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2515       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2516       return false;
2517     }
2518 
2519     // If the argument is readonly and the underlying value is readonly, there
2520     // is no read-write aliasing.
2521     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2522     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2523       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2524       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2525       return false;
2526     }
2527 
2528     // We have to utilize actual alias analysis queries so we need the object.
2529     if (!AAR)
2530       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2531 
2532     // Try to rule it out at the call site.
2533     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2534     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2535                          "callsite arguments: "
2536                       << getAssociatedValue() << " " << *ArgOp << " => "
2537                       << (IsAliasing ? "" : "no-") << "alias \n");
2538 
2539     return IsAliasing;
2540   }
2541 
2542   bool
2543   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2544                                          const AAMemoryBehavior &MemBehaviorAA,
2545                                          const AANoAlias &NoAliasAA) {
2546     // We can deduce "noalias" if the following conditions hold.
2547     // (i)   Associated value is assumed to be noalias in the definition.
2548     // (ii)  Associated value is assumed to be no-capture in all the uses
2549     //       possibly executed before this callsite.
2550     // (iii) There is no other pointer argument which could alias with the
2551     //       value.
2552 
2553     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2554     if (!AssociatedValueIsNoAliasAtDef) {
2555       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2556                         << " is not no-alias at the definition\n");
2557       return false;
2558     }
2559 
2560     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2561 
2562     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2563     auto &NoCaptureAA =
2564         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2565     // Check whether the value is captured in the scope using AANoCapture.
2566     //      Look at CFG and check only uses possibly executed before this
2567     //      callsite.
2568     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2569       Instruction *UserI = cast<Instruction>(U.getUser());
2570 
2571       // If user if curr instr and only use.
2572       if (UserI == getCtxI() && UserI->hasOneUse())
2573         return true;
2574 
2575       const Function *ScopeFn = VIRP.getAnchorScope();
2576       if (ScopeFn) {
2577         const auto &ReachabilityAA =
2578             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2579 
2580         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2581           return true;
2582 
2583         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2584           if (CB->isArgOperand(&U)) {
2585 
2586             unsigned ArgNo = CB->getArgOperandNo(&U);
2587 
2588             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2589                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2590 
2591             if (NoCaptureAA.isAssumedNoCapture())
2592               return true;
2593           }
2594         }
2595       }
2596 
2597       // For cases which can potentially have more users
2598       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2599           isa<SelectInst>(U)) {
2600         Follow = true;
2601         return true;
2602       }
2603 
2604       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2605       return false;
2606     };
2607 
2608     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2609       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2610         LLVM_DEBUG(
2611             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2612                    << " cannot be noalias as it is potentially captured\n");
2613         return false;
2614       }
2615     }
2616     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2617 
2618     // Check there is no other pointer argument which could alias with the
2619     // value passed at this call site.
2620     // TODO: AbstractCallSite
2621     const auto &CB = cast<CallBase>(getAnchorValue());
2622     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2623          OtherArgNo++)
2624       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2625         return false;
2626 
2627     return true;
2628   }
2629 
2630   /// See AbstractAttribute::updateImpl(...).
2631   ChangeStatus updateImpl(Attributor &A) override {
2632     // If the argument is readnone we are done as there are no accesses via the
2633     // argument.
2634     auto &MemBehaviorAA =
2635         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2636                                      /* TrackDependence */ false);
2637     if (MemBehaviorAA.isAssumedReadNone()) {
2638       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2639       return ChangeStatus::UNCHANGED;
2640     }
2641 
2642     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2643     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2644                                                   /* TrackDependence */ false);
2645 
2646     AAResults *AAR = nullptr;
2647     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2648                                                NoAliasAA)) {
2649       LLVM_DEBUG(
2650           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2651       return ChangeStatus::UNCHANGED;
2652     }
2653 
2654     return indicatePessimisticFixpoint();
2655   }
2656 
2657   /// See AbstractAttribute::trackStatistics()
2658   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2659 };
2660 
2661 /// NoAlias attribute for function return value.
2662 struct AANoAliasReturned final : AANoAliasImpl {
2663   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2664       : AANoAliasImpl(IRP, A) {}
2665 
2666   /// See AbstractAttribute::updateImpl(...).
2667   virtual ChangeStatus updateImpl(Attributor &A) override {
2668 
2669     auto CheckReturnValue = [&](Value &RV) -> bool {
2670       if (Constant *C = dyn_cast<Constant>(&RV))
2671         if (C->isNullValue() || isa<UndefValue>(C))
2672           return true;
2673 
2674       /// For now, we can only deduce noalias if we have call sites.
2675       /// FIXME: add more support.
2676       if (!isa<CallBase>(&RV))
2677         return false;
2678 
2679       const IRPosition &RVPos = IRPosition::value(RV);
2680       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2681       if (!NoAliasAA.isAssumedNoAlias())
2682         return false;
2683 
2684       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2685       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2686     };
2687 
2688     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2689       return indicatePessimisticFixpoint();
2690 
2691     return ChangeStatus::UNCHANGED;
2692   }
2693 
2694   /// See AbstractAttribute::trackStatistics()
2695   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2696 };
2697 
2698 /// NoAlias attribute deduction for a call site return value.
2699 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2700   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2701       : AANoAliasImpl(IRP, A) {}
2702 
2703   /// See AbstractAttribute::initialize(...).
2704   void initialize(Attributor &A) override {
2705     AANoAliasImpl::initialize(A);
2706     Function *F = getAssociatedFunction();
2707     if (!F)
2708       indicatePessimisticFixpoint();
2709   }
2710 
2711   /// See AbstractAttribute::updateImpl(...).
2712   ChangeStatus updateImpl(Attributor &A) override {
2713     // TODO: Once we have call site specific value information we can provide
2714     //       call site specific liveness information and then it makes
2715     //       sense to specialize attributes for call sites arguments instead of
2716     //       redirecting requests to the callee argument.
2717     Function *F = getAssociatedFunction();
2718     const IRPosition &FnPos = IRPosition::returned(*F);
2719     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2720     return clampStateAndIndicateChange(
2721         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2722   }
2723 
2724   /// See AbstractAttribute::trackStatistics()
2725   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2726 };
2727 
2728 /// -------------------AAIsDead Function Attribute-----------------------
2729 
2730 struct AAIsDeadValueImpl : public AAIsDead {
2731   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2732 
2733   /// See AAIsDead::isAssumedDead().
2734   bool isAssumedDead() const override { return getAssumed(); }
2735 
2736   /// See AAIsDead::isKnownDead().
2737   bool isKnownDead() const override { return getKnown(); }
2738 
2739   /// See AAIsDead::isAssumedDead(BasicBlock *).
2740   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2741 
2742   /// See AAIsDead::isKnownDead(BasicBlock *).
2743   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2744 
2745   /// See AAIsDead::isAssumedDead(Instruction *I).
2746   bool isAssumedDead(const Instruction *I) const override {
2747     return I == getCtxI() && isAssumedDead();
2748   }
2749 
2750   /// See AAIsDead::isKnownDead(Instruction *I).
2751   bool isKnownDead(const Instruction *I) const override {
2752     return isAssumedDead(I) && getKnown();
2753   }
2754 
2755   /// See AbstractAttribute::getAsStr().
2756   const std::string getAsStr() const override {
2757     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2758   }
2759 
2760   /// Check if all uses are assumed dead.
2761   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2762     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2763     // Explicitly set the dependence class to required because we want a long
2764     // chain of N dependent instructions to be considered live as soon as one is
2765     // without going through N update cycles. This is not required for
2766     // correctness.
2767     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2768   }
2769 
2770   /// Determine if \p I is assumed to be side-effect free.
2771   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2772     if (!I || wouldInstructionBeTriviallyDead(I))
2773       return true;
2774 
2775     auto *CB = dyn_cast<CallBase>(I);
2776     if (!CB || isa<IntrinsicInst>(CB))
2777       return false;
2778 
2779     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2780     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2781         *this, CallIRP, /* TrackDependence */ false);
2782     if (!NoUnwindAA.isAssumedNoUnwind())
2783       return false;
2784     if (!NoUnwindAA.isKnownNoUnwind())
2785       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2786 
2787     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2788         *this, CallIRP, /* TrackDependence */ false);
2789     if (MemBehaviorAA.isAssumedReadOnly()) {
2790       if (!MemBehaviorAA.isKnownReadOnly())
2791         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2792       return true;
2793     }
2794     return false;
2795   }
2796 };
2797 
2798 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2799   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2800       : AAIsDeadValueImpl(IRP, A) {}
2801 
2802   /// See AbstractAttribute::initialize(...).
2803   void initialize(Attributor &A) override {
2804     if (isa<UndefValue>(getAssociatedValue())) {
2805       indicatePessimisticFixpoint();
2806       return;
2807     }
2808 
2809     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2810     if (!isAssumedSideEffectFree(A, I))
2811       indicatePessimisticFixpoint();
2812   }
2813 
2814   /// See AbstractAttribute::updateImpl(...).
2815   ChangeStatus updateImpl(Attributor &A) override {
2816     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2817     if (!isAssumedSideEffectFree(A, I))
2818       return indicatePessimisticFixpoint();
2819 
2820     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2821       return indicatePessimisticFixpoint();
2822     return ChangeStatus::UNCHANGED;
2823   }
2824 
2825   /// See AbstractAttribute::manifest(...).
2826   ChangeStatus manifest(Attributor &A) override {
2827     Value &V = getAssociatedValue();
2828     if (auto *I = dyn_cast<Instruction>(&V)) {
2829       // If we get here we basically know the users are all dead. We check if
2830       // isAssumedSideEffectFree returns true here again because it might not be
2831       // the case and only the users are dead but the instruction (=call) is
2832       // still needed.
2833       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2834         A.deleteAfterManifest(*I);
2835         return ChangeStatus::CHANGED;
2836       }
2837     }
2838     if (V.use_empty())
2839       return ChangeStatus::UNCHANGED;
2840 
2841     bool UsedAssumedInformation = false;
2842     Optional<Constant *> C =
2843         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2844     if (C.hasValue() && C.getValue())
2845       return ChangeStatus::UNCHANGED;
2846 
2847     // Replace the value with undef as it is dead but keep droppable uses around
2848     // as they provide information we don't want to give up on just yet.
2849     UndefValue &UV = *UndefValue::get(V.getType());
2850     bool AnyChange =
2851         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2852     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2853   }
2854 
2855   /// See AbstractAttribute::trackStatistics()
2856   void trackStatistics() const override {
2857     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2858   }
2859 };
2860 
2861 struct AAIsDeadArgument : public AAIsDeadFloating {
2862   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2863       : AAIsDeadFloating(IRP, A) {}
2864 
2865   /// See AbstractAttribute::initialize(...).
2866   void initialize(Attributor &A) override {
2867     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2868       indicatePessimisticFixpoint();
2869   }
2870 
2871   /// See AbstractAttribute::manifest(...).
2872   ChangeStatus manifest(Attributor &A) override {
2873     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2874     Argument &Arg = *getAssociatedArgument();
2875     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2876       if (A.registerFunctionSignatureRewrite(
2877               Arg, /* ReplacementTypes */ {},
2878               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2879               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2880         Arg.dropDroppableUses();
2881         return ChangeStatus::CHANGED;
2882       }
2883     return Changed;
2884   }
2885 
2886   /// See AbstractAttribute::trackStatistics()
2887   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2888 };
2889 
2890 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2891   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2892       : AAIsDeadValueImpl(IRP, A) {}
2893 
2894   /// See AbstractAttribute::initialize(...).
2895   void initialize(Attributor &A) override {
2896     if (isa<UndefValue>(getAssociatedValue()))
2897       indicatePessimisticFixpoint();
2898   }
2899 
2900   /// See AbstractAttribute::updateImpl(...).
2901   ChangeStatus updateImpl(Attributor &A) override {
2902     // TODO: Once we have call site specific value information we can provide
2903     //       call site specific liveness information and then it makes
2904     //       sense to specialize attributes for call sites arguments instead of
2905     //       redirecting requests to the callee argument.
2906     Argument *Arg = getAssociatedArgument();
2907     if (!Arg)
2908       return indicatePessimisticFixpoint();
2909     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2910     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2911     return clampStateAndIndicateChange(
2912         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2913   }
2914 
2915   /// See AbstractAttribute::manifest(...).
2916   ChangeStatus manifest(Attributor &A) override {
2917     CallBase &CB = cast<CallBase>(getAnchorValue());
2918     Use &U = CB.getArgOperandUse(getArgNo());
2919     assert(!isa<UndefValue>(U.get()) &&
2920            "Expected undef values to be filtered out!");
2921     UndefValue &UV = *UndefValue::get(U->getType());
2922     if (A.changeUseAfterManifest(U, UV))
2923       return ChangeStatus::CHANGED;
2924     return ChangeStatus::UNCHANGED;
2925   }
2926 
2927   /// See AbstractAttribute::trackStatistics()
2928   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2929 };
2930 
2931 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2932   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2933       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2934 
2935   /// See AAIsDead::isAssumedDead().
2936   bool isAssumedDead() const override {
2937     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2938   }
2939 
2940   /// See AbstractAttribute::initialize(...).
2941   void initialize(Attributor &A) override {
2942     if (isa<UndefValue>(getAssociatedValue())) {
2943       indicatePessimisticFixpoint();
2944       return;
2945     }
2946 
2947     // We track this separately as a secondary state.
2948     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2949   }
2950 
2951   /// See AbstractAttribute::updateImpl(...).
2952   ChangeStatus updateImpl(Attributor &A) override {
2953     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2954     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2955       IsAssumedSideEffectFree = false;
2956       Changed = ChangeStatus::CHANGED;
2957     }
2958 
2959     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2960       return indicatePessimisticFixpoint();
2961     return Changed;
2962   }
2963 
2964   /// See AbstractAttribute::trackStatistics()
2965   void trackStatistics() const override {
2966     if (IsAssumedSideEffectFree)
2967       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2968     else
2969       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2970   }
2971 
2972   /// See AbstractAttribute::getAsStr().
2973   const std::string getAsStr() const override {
2974     return isAssumedDead()
2975                ? "assumed-dead"
2976                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2977   }
2978 
2979 private:
2980   bool IsAssumedSideEffectFree;
2981 };
2982 
2983 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2984   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2985       : AAIsDeadValueImpl(IRP, A) {}
2986 
2987   /// See AbstractAttribute::updateImpl(...).
2988   ChangeStatus updateImpl(Attributor &A) override {
2989 
2990     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2991                               {Instruction::Ret});
2992 
2993     auto PredForCallSite = [&](AbstractCallSite ACS) {
2994       if (ACS.isCallbackCall() || !ACS.getInstruction())
2995         return false;
2996       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2997     };
2998 
2999     bool AllCallSitesKnown;
3000     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3001                                 AllCallSitesKnown))
3002       return indicatePessimisticFixpoint();
3003 
3004     return ChangeStatus::UNCHANGED;
3005   }
3006 
3007   /// See AbstractAttribute::manifest(...).
3008   ChangeStatus manifest(Attributor &A) override {
3009     // TODO: Rewrite the signature to return void?
3010     bool AnyChange = false;
3011     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3012     auto RetInstPred = [&](Instruction &I) {
3013       ReturnInst &RI = cast<ReturnInst>(I);
3014       if (!isa<UndefValue>(RI.getReturnValue()))
3015         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3016       return true;
3017     };
3018     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3019     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3020   }
3021 
3022   /// See AbstractAttribute::trackStatistics()
3023   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3024 };
3025 
3026 struct AAIsDeadFunction : public AAIsDead {
3027   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3028 
3029   /// See AbstractAttribute::initialize(...).
3030   void initialize(Attributor &A) override {
3031     const Function *F = getAnchorScope();
3032     if (F && !F->isDeclaration()) {
3033       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3034       assumeLive(A, F->getEntryBlock());
3035     }
3036   }
3037 
3038   /// See AbstractAttribute::getAsStr().
3039   const std::string getAsStr() const override {
3040     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3041            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3042            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3043            std::to_string(KnownDeadEnds.size()) + "]";
3044   }
3045 
3046   /// See AbstractAttribute::manifest(...).
3047   ChangeStatus manifest(Attributor &A) override {
3048     assert(getState().isValidState() &&
3049            "Attempted to manifest an invalid state!");
3050 
3051     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3052     Function &F = *getAnchorScope();
3053 
3054     if (AssumedLiveBlocks.empty()) {
3055       A.deleteAfterManifest(F);
3056       return ChangeStatus::CHANGED;
3057     }
3058 
3059     // Flag to determine if we can change an invoke to a call assuming the
3060     // callee is nounwind. This is not possible if the personality of the
3061     // function allows to catch asynchronous exceptions.
3062     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3063 
3064     KnownDeadEnds.set_union(ToBeExploredFrom);
3065     for (const Instruction *DeadEndI : KnownDeadEnds) {
3066       auto *CB = dyn_cast<CallBase>(DeadEndI);
3067       if (!CB)
3068         continue;
3069       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3070           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3071           DepClassTy::OPTIONAL);
3072       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3073       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3074         continue;
3075 
3076       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3077         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3078       else
3079         A.changeToUnreachableAfterManifest(
3080             const_cast<Instruction *>(DeadEndI->getNextNode()));
3081       HasChanged = ChangeStatus::CHANGED;
3082     }
3083 
3084     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3085     for (BasicBlock &BB : F)
3086       if (!AssumedLiveBlocks.count(&BB)) {
3087         A.deleteAfterManifest(BB);
3088         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3089       }
3090 
3091     return HasChanged;
3092   }
3093 
3094   /// See AbstractAttribute::updateImpl(...).
3095   ChangeStatus updateImpl(Attributor &A) override;
3096 
3097   /// See AbstractAttribute::trackStatistics()
3098   void trackStatistics() const override {}
3099 
3100   /// Returns true if the function is assumed dead.
3101   bool isAssumedDead() const override { return false; }
3102 
3103   /// See AAIsDead::isKnownDead().
3104   bool isKnownDead() const override { return false; }
3105 
3106   /// See AAIsDead::isAssumedDead(BasicBlock *).
3107   bool isAssumedDead(const BasicBlock *BB) const override {
3108     assert(BB->getParent() == getAnchorScope() &&
3109            "BB must be in the same anchor scope function.");
3110 
3111     if (!getAssumed())
3112       return false;
3113     return !AssumedLiveBlocks.count(BB);
3114   }
3115 
3116   /// See AAIsDead::isKnownDead(BasicBlock *).
3117   bool isKnownDead(const BasicBlock *BB) const override {
3118     return getKnown() && isAssumedDead(BB);
3119   }
3120 
3121   /// See AAIsDead::isAssumed(Instruction *I).
3122   bool isAssumedDead(const Instruction *I) const override {
3123     assert(I->getParent()->getParent() == getAnchorScope() &&
3124            "Instruction must be in the same anchor scope function.");
3125 
3126     if (!getAssumed())
3127       return false;
3128 
3129     // If it is not in AssumedLiveBlocks then it for sure dead.
3130     // Otherwise, it can still be after noreturn call in a live block.
3131     if (!AssumedLiveBlocks.count(I->getParent()))
3132       return true;
3133 
3134     // If it is not after a liveness barrier it is live.
3135     const Instruction *PrevI = I->getPrevNode();
3136     while (PrevI) {
3137       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3138         return true;
3139       PrevI = PrevI->getPrevNode();
3140     }
3141     return false;
3142   }
3143 
3144   /// See AAIsDead::isKnownDead(Instruction *I).
3145   bool isKnownDead(const Instruction *I) const override {
3146     return getKnown() && isAssumedDead(I);
3147   }
3148 
3149   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3150   /// that internal function called from \p BB should now be looked at.
3151   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3152     if (!AssumedLiveBlocks.insert(&BB).second)
3153       return false;
3154 
3155     // We assume that all of BB is (probably) live now and if there are calls to
3156     // internal functions we will assume that those are now live as well. This
3157     // is a performance optimization for blocks with calls to a lot of internal
3158     // functions. It can however cause dead functions to be treated as live.
3159     for (const Instruction &I : BB)
3160       if (const auto *CB = dyn_cast<CallBase>(&I))
3161         if (const Function *F = CB->getCalledFunction())
3162           if (F->hasLocalLinkage())
3163             A.markLiveInternalFunction(*F);
3164     return true;
3165   }
3166 
3167   /// Collection of instructions that need to be explored again, e.g., we
3168   /// did assume they do not transfer control to (one of their) successors.
3169   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3170 
3171   /// Collection of instructions that are known to not transfer control.
3172   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3173 
3174   /// Collection of all assumed live BasicBlocks.
3175   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3176 };
3177 
3178 static bool
3179 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3180                         AbstractAttribute &AA,
3181                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3182   const IRPosition &IPos = IRPosition::callsite_function(CB);
3183 
3184   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3185       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3186   if (NoReturnAA.isAssumedNoReturn())
3187     return !NoReturnAA.isKnownNoReturn();
3188   if (CB.isTerminator())
3189     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3190   else
3191     AliveSuccessors.push_back(CB.getNextNode());
3192   return false;
3193 }
3194 
3195 static bool
3196 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3197                         AbstractAttribute &AA,
3198                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3199   bool UsedAssumedInformation =
3200       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3201 
3202   // First, determine if we can change an invoke to a call assuming the
3203   // callee is nounwind. This is not possible if the personality of the
3204   // function allows to catch asynchronous exceptions.
3205   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3206     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3207   } else {
3208     const IRPosition &IPos = IRPosition::callsite_function(II);
3209     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3210         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3211     if (AANoUnw.isAssumedNoUnwind()) {
3212       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3213     } else {
3214       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3215     }
3216   }
3217   return UsedAssumedInformation;
3218 }
3219 
3220 static bool
3221 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3222                         AbstractAttribute &AA,
3223                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3224   bool UsedAssumedInformation = false;
3225   if (BI.getNumSuccessors() == 1) {
3226     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3227   } else {
3228     Optional<ConstantInt *> CI = getAssumedConstantInt(
3229         A, *BI.getCondition(), AA, UsedAssumedInformation);
3230     if (!CI.hasValue()) {
3231       // No value yet, assume both edges are dead.
3232     } else if (CI.getValue()) {
3233       const BasicBlock *SuccBB =
3234           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3235       AliveSuccessors.push_back(&SuccBB->front());
3236     } else {
3237       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3238       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3239       UsedAssumedInformation = false;
3240     }
3241   }
3242   return UsedAssumedInformation;
3243 }
3244 
3245 static bool
3246 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3247                         AbstractAttribute &AA,
3248                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3249   bool UsedAssumedInformation = false;
3250   Optional<ConstantInt *> CI =
3251       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3252   if (!CI.hasValue()) {
3253     // No value yet, assume all edges are dead.
3254   } else if (CI.getValue()) {
3255     for (auto &CaseIt : SI.cases()) {
3256       if (CaseIt.getCaseValue() == CI.getValue()) {
3257         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3258         return UsedAssumedInformation;
3259       }
3260     }
3261     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3262     return UsedAssumedInformation;
3263   } else {
3264     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3265       AliveSuccessors.push_back(&SuccBB->front());
3266   }
3267   return UsedAssumedInformation;
3268 }
3269 
3270 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3271   ChangeStatus Change = ChangeStatus::UNCHANGED;
3272 
3273   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3274                     << getAnchorScope()->size() << "] BBs and "
3275                     << ToBeExploredFrom.size() << " exploration points and "
3276                     << KnownDeadEnds.size() << " known dead ends\n");
3277 
3278   // Copy and clear the list of instructions we need to explore from. It is
3279   // refilled with instructions the next update has to look at.
3280   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3281                                                ToBeExploredFrom.end());
3282   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3283 
3284   SmallVector<const Instruction *, 8> AliveSuccessors;
3285   while (!Worklist.empty()) {
3286     const Instruction *I = Worklist.pop_back_val();
3287     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3288 
3289     AliveSuccessors.clear();
3290 
3291     bool UsedAssumedInformation = false;
3292     switch (I->getOpcode()) {
3293     // TODO: look for (assumed) UB to backwards propagate "deadness".
3294     default:
3295       if (I->isTerminator()) {
3296         for (const BasicBlock *SuccBB : successors(I->getParent()))
3297           AliveSuccessors.push_back(&SuccBB->front());
3298       } else {
3299         AliveSuccessors.push_back(I->getNextNode());
3300       }
3301       break;
3302     case Instruction::Call:
3303       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3304                                                        *this, AliveSuccessors);
3305       break;
3306     case Instruction::Invoke:
3307       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3308                                                        *this, AliveSuccessors);
3309       break;
3310     case Instruction::Br:
3311       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3312                                                        *this, AliveSuccessors);
3313       break;
3314     case Instruction::Switch:
3315       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3316                                                        *this, AliveSuccessors);
3317       break;
3318     }
3319 
3320     if (UsedAssumedInformation) {
3321       NewToBeExploredFrom.insert(I);
3322     } else {
3323       Change = ChangeStatus::CHANGED;
3324       if (AliveSuccessors.empty() ||
3325           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3326         KnownDeadEnds.insert(I);
3327     }
3328 
3329     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3330                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3331                       << UsedAssumedInformation << "\n");
3332 
3333     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3334       if (!I->isTerminator()) {
3335         assert(AliveSuccessors.size() == 1 &&
3336                "Non-terminator expected to have a single successor!");
3337         Worklist.push_back(AliveSuccessor);
3338       } else {
3339         if (assumeLive(A, *AliveSuccessor->getParent()))
3340           Worklist.push_back(AliveSuccessor);
3341       }
3342     }
3343   }
3344 
3345   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3346 
3347   // If we know everything is live there is no need to query for liveness.
3348   // Instead, indicating a pessimistic fixpoint will cause the state to be
3349   // "invalid" and all queries to be answered conservatively without lookups.
3350   // To be in this state we have to (1) finished the exploration and (3) not
3351   // discovered any non-trivial dead end and (2) not ruled unreachable code
3352   // dead.
3353   if (ToBeExploredFrom.empty() &&
3354       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3355       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3356         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3357       }))
3358     return indicatePessimisticFixpoint();
3359   return Change;
3360 }
3361 
3362 /// Liveness information for a call sites.
3363 struct AAIsDeadCallSite final : AAIsDeadFunction {
3364   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3365       : AAIsDeadFunction(IRP, A) {}
3366 
3367   /// See AbstractAttribute::initialize(...).
3368   void initialize(Attributor &A) override {
3369     // TODO: Once we have call site specific value information we can provide
3370     //       call site specific liveness information and then it makes
3371     //       sense to specialize attributes for call sites instead of
3372     //       redirecting requests to the callee.
3373     llvm_unreachable("Abstract attributes for liveness are not "
3374                      "supported for call sites yet!");
3375   }
3376 
3377   /// See AbstractAttribute::updateImpl(...).
3378   ChangeStatus updateImpl(Attributor &A) override {
3379     return indicatePessimisticFixpoint();
3380   }
3381 
3382   /// See AbstractAttribute::trackStatistics()
3383   void trackStatistics() const override {}
3384 };
3385 
3386 /// -------------------- Dereferenceable Argument Attribute --------------------
3387 
3388 template <>
3389 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3390                                                      const DerefState &R) {
3391   ChangeStatus CS0 =
3392       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3393   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3394   return CS0 | CS1;
3395 }
3396 
3397 struct AADereferenceableImpl : AADereferenceable {
3398   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3399       : AADereferenceable(IRP, A) {}
3400   using StateType = DerefState;
3401 
3402   /// See AbstractAttribute::initialize(...).
3403   void initialize(Attributor &A) override {
3404     SmallVector<Attribute, 4> Attrs;
3405     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3406              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3407     for (const Attribute &Attr : Attrs)
3408       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3409 
3410     const IRPosition &IRP = this->getIRPosition();
3411     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3412                                        /* TrackDependence */ false);
3413 
3414     bool CanBeNull;
3415     takeKnownDerefBytesMaximum(
3416         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3417             A.getDataLayout(), CanBeNull));
3418 
3419     bool IsFnInterface = IRP.isFnInterfaceKind();
3420     Function *FnScope = IRP.getAnchorScope();
3421     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3422       indicatePessimisticFixpoint();
3423       return;
3424     }
3425 
3426     if (Instruction *CtxI = getCtxI())
3427       followUsesInMBEC(*this, A, getState(), *CtxI);
3428   }
3429 
3430   /// See AbstractAttribute::getState()
3431   /// {
3432   StateType &getState() override { return *this; }
3433   const StateType &getState() const override { return *this; }
3434   /// }
3435 
3436   /// Helper function for collecting accessed bytes in must-be-executed-context
3437   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3438                               DerefState &State) {
3439     const Value *UseV = U->get();
3440     if (!UseV->getType()->isPointerTy())
3441       return;
3442 
3443     Type *PtrTy = UseV->getType();
3444     const DataLayout &DL = A.getDataLayout();
3445     int64_t Offset;
3446     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3447             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3448       if (Base == &getAssociatedValue() &&
3449           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3450         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3451         State.addAccessedBytes(Offset, Size);
3452       }
3453     }
3454     return;
3455   }
3456 
3457   /// See followUsesInMBEC
3458   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3459                        AADereferenceable::StateType &State) {
3460     bool IsNonNull = false;
3461     bool TrackUse = false;
3462     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3463         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3464     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3465                       << " for instruction " << *I << "\n");
3466 
3467     addAccessedBytesForUse(A, U, I, State);
3468     State.takeKnownDerefBytesMaximum(DerefBytes);
3469     return TrackUse;
3470   }
3471 
3472   /// See AbstractAttribute::manifest(...).
3473   ChangeStatus manifest(Attributor &A) override {
3474     ChangeStatus Change = AADereferenceable::manifest(A);
3475     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3476       removeAttrs({Attribute::DereferenceableOrNull});
3477       return ChangeStatus::CHANGED;
3478     }
3479     return Change;
3480   }
3481 
3482   void getDeducedAttributes(LLVMContext &Ctx,
3483                             SmallVectorImpl<Attribute> &Attrs) const override {
3484     // TODO: Add *_globally support
3485     if (isAssumedNonNull())
3486       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3487           Ctx, getAssumedDereferenceableBytes()));
3488     else
3489       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3490           Ctx, getAssumedDereferenceableBytes()));
3491   }
3492 
3493   /// See AbstractAttribute::getAsStr().
3494   const std::string getAsStr() const override {
3495     if (!getAssumedDereferenceableBytes())
3496       return "unknown-dereferenceable";
3497     return std::string("dereferenceable") +
3498            (isAssumedNonNull() ? "" : "_or_null") +
3499            (isAssumedGlobal() ? "_globally" : "") + "<" +
3500            std::to_string(getKnownDereferenceableBytes()) + "-" +
3501            std::to_string(getAssumedDereferenceableBytes()) + ">";
3502   }
3503 };
3504 
3505 /// Dereferenceable attribute for a floating value.
3506 struct AADereferenceableFloating : AADereferenceableImpl {
3507   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3508       : AADereferenceableImpl(IRP, A) {}
3509 
3510   /// See AbstractAttribute::updateImpl(...).
3511   ChangeStatus updateImpl(Attributor &A) override {
3512     const DataLayout &DL = A.getDataLayout();
3513 
3514     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3515                             bool Stripped) -> bool {
3516       unsigned IdxWidth =
3517           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3518       APInt Offset(IdxWidth, 0);
3519       const Value *Base =
3520           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3521 
3522       const auto &AA =
3523           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3524       int64_t DerefBytes = 0;
3525       if (!Stripped && this == &AA) {
3526         // Use IR information if we did not strip anything.
3527         // TODO: track globally.
3528         bool CanBeNull;
3529         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3530         T.GlobalState.indicatePessimisticFixpoint();
3531       } else {
3532         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3533         DerefBytes = DS.DerefBytesState.getAssumed();
3534         T.GlobalState &= DS.GlobalState;
3535       }
3536 
3537       // For now we do not try to "increase" dereferenceability due to negative
3538       // indices as we first have to come up with code to deal with loops and
3539       // for overflows of the dereferenceable bytes.
3540       int64_t OffsetSExt = Offset.getSExtValue();
3541       if (OffsetSExt < 0)
3542         OffsetSExt = 0;
3543 
3544       T.takeAssumedDerefBytesMinimum(
3545           std::max(int64_t(0), DerefBytes - OffsetSExt));
3546 
3547       if (this == &AA) {
3548         if (!Stripped) {
3549           // If nothing was stripped IR information is all we got.
3550           T.takeKnownDerefBytesMaximum(
3551               std::max(int64_t(0), DerefBytes - OffsetSExt));
3552           T.indicatePessimisticFixpoint();
3553         } else if (OffsetSExt > 0) {
3554           // If something was stripped but there is circular reasoning we look
3555           // for the offset. If it is positive we basically decrease the
3556           // dereferenceable bytes in a circluar loop now, which will simply
3557           // drive them down to the known value in a very slow way which we
3558           // can accelerate.
3559           T.indicatePessimisticFixpoint();
3560         }
3561       }
3562 
3563       return T.isValidState();
3564     };
3565 
3566     DerefState T;
3567     if (!genericValueTraversal<AADereferenceable, DerefState>(
3568             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3569       return indicatePessimisticFixpoint();
3570 
3571     return clampStateAndIndicateChange(getState(), T);
3572   }
3573 
3574   /// See AbstractAttribute::trackStatistics()
3575   void trackStatistics() const override {
3576     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3577   }
3578 };
3579 
3580 /// Dereferenceable attribute for a return value.
3581 struct AADereferenceableReturned final
3582     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3583   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3584       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3585             IRP, A) {}
3586 
3587   /// See AbstractAttribute::trackStatistics()
3588   void trackStatistics() const override {
3589     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3590   }
3591 };
3592 
3593 /// Dereferenceable attribute for an argument
3594 struct AADereferenceableArgument final
3595     : AAArgumentFromCallSiteArguments<AADereferenceable,
3596                                       AADereferenceableImpl> {
3597   using Base =
3598       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3599   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3600       : Base(IRP, A) {}
3601 
3602   /// See AbstractAttribute::trackStatistics()
3603   void trackStatistics() const override {
3604     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3605   }
3606 };
3607 
3608 /// Dereferenceable attribute for a call site argument.
3609 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3610   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3611       : AADereferenceableFloating(IRP, A) {}
3612 
3613   /// See AbstractAttribute::trackStatistics()
3614   void trackStatistics() const override {
3615     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3616   }
3617 };
3618 
3619 /// Dereferenceable attribute deduction for a call site return value.
3620 struct AADereferenceableCallSiteReturned final
3621     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3622   using Base =
3623       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3624   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3625       : Base(IRP, A) {}
3626 
3627   /// See AbstractAttribute::trackStatistics()
3628   void trackStatistics() const override {
3629     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3630   }
3631 };
3632 
3633 // ------------------------ Align Argument Attribute ------------------------
3634 
3635 static unsigned getKnownAlignForUse(Attributor &A,
3636                                     AbstractAttribute &QueryingAA,
3637                                     Value &AssociatedValue, const Use *U,
3638                                     const Instruction *I, bool &TrackUse) {
3639   // We need to follow common pointer manipulation uses to the accesses they
3640   // feed into.
3641   if (isa<CastInst>(I)) {
3642     // Follow all but ptr2int casts.
3643     TrackUse = !isa<PtrToIntInst>(I);
3644     return 0;
3645   }
3646   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3647     if (GEP->hasAllConstantIndices()) {
3648       TrackUse = true;
3649       return 0;
3650     }
3651   }
3652 
3653   MaybeAlign MA;
3654   if (const auto *CB = dyn_cast<CallBase>(I)) {
3655     if (CB->isBundleOperand(U) || CB->isCallee(U))
3656       return 0;
3657 
3658     unsigned ArgNo = CB->getArgOperandNo(U);
3659     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3660     // As long as we only use known information there is no need to track
3661     // dependences here.
3662     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3663                                         /* TrackDependence */ false);
3664     MA = MaybeAlign(AlignAA.getKnownAlign());
3665   }
3666 
3667   const DataLayout &DL = A.getDataLayout();
3668   const Value *UseV = U->get();
3669   if (auto *SI = dyn_cast<StoreInst>(I)) {
3670     if (SI->getPointerOperand() == UseV)
3671       MA = SI->getAlign();
3672   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3673     if (LI->getPointerOperand() == UseV)
3674       MA = LI->getAlign();
3675   }
3676 
3677   if (!MA || *MA <= 1)
3678     return 0;
3679 
3680   unsigned Alignment = MA->value();
3681   int64_t Offset;
3682 
3683   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3684     if (Base == &AssociatedValue) {
3685       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3686       // So we can say that the maximum power of two which is a divisor of
3687       // gcd(Offset, Alignment) is an alignment.
3688 
3689       uint32_t gcd =
3690           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3691       Alignment = llvm::PowerOf2Floor(gcd);
3692     }
3693   }
3694 
3695   return Alignment;
3696 }
3697 
3698 struct AAAlignImpl : AAAlign {
3699   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3700 
3701   /// See AbstractAttribute::initialize(...).
3702   void initialize(Attributor &A) override {
3703     SmallVector<Attribute, 4> Attrs;
3704     getAttrs({Attribute::Alignment}, Attrs);
3705     for (const Attribute &Attr : Attrs)
3706       takeKnownMaximum(Attr.getValueAsInt());
3707 
3708     Value &V = getAssociatedValue();
3709     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3710     //       use of the function pointer. This was caused by D73131. We want to
3711     //       avoid this for function pointers especially because we iterate
3712     //       their uses and int2ptr is not handled. It is not a correctness
3713     //       problem though!
3714     if (!V.getType()->getPointerElementType()->isFunctionTy())
3715       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3716 
3717     if (getIRPosition().isFnInterfaceKind() &&
3718         (!getAnchorScope() ||
3719          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3720       indicatePessimisticFixpoint();
3721       return;
3722     }
3723 
3724     if (Instruction *CtxI = getCtxI())
3725       followUsesInMBEC(*this, A, getState(), *CtxI);
3726   }
3727 
3728   /// See AbstractAttribute::manifest(...).
3729   ChangeStatus manifest(Attributor &A) override {
3730     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3731 
3732     // Check for users that allow alignment annotations.
3733     Value &AssociatedValue = getAssociatedValue();
3734     for (const Use &U : AssociatedValue.uses()) {
3735       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3736         if (SI->getPointerOperand() == &AssociatedValue)
3737           if (SI->getAlignment() < getAssumedAlign()) {
3738             STATS_DECLTRACK(AAAlign, Store,
3739                             "Number of times alignment added to a store");
3740             SI->setAlignment(Align(getAssumedAlign()));
3741             LoadStoreChanged = ChangeStatus::CHANGED;
3742           }
3743       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3744         if (LI->getPointerOperand() == &AssociatedValue)
3745           if (LI->getAlignment() < getAssumedAlign()) {
3746             LI->setAlignment(Align(getAssumedAlign()));
3747             STATS_DECLTRACK(AAAlign, Load,
3748                             "Number of times alignment added to a load");
3749             LoadStoreChanged = ChangeStatus::CHANGED;
3750           }
3751       }
3752     }
3753 
3754     ChangeStatus Changed = AAAlign::manifest(A);
3755 
3756     Align InheritAlign =
3757         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3758     if (InheritAlign >= getAssumedAlign())
3759       return LoadStoreChanged;
3760     return Changed | LoadStoreChanged;
3761   }
3762 
3763   // TODO: Provide a helper to determine the implied ABI alignment and check in
3764   //       the existing manifest method and a new one for AAAlignImpl that value
3765   //       to avoid making the alignment explicit if it did not improve.
3766 
3767   /// See AbstractAttribute::getDeducedAttributes
3768   virtual void
3769   getDeducedAttributes(LLVMContext &Ctx,
3770                        SmallVectorImpl<Attribute> &Attrs) const override {
3771     if (getAssumedAlign() > 1)
3772       Attrs.emplace_back(
3773           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3774   }
3775 
3776   /// See followUsesInMBEC
3777   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3778                        AAAlign::StateType &State) {
3779     bool TrackUse = false;
3780 
3781     unsigned int KnownAlign =
3782         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3783     State.takeKnownMaximum(KnownAlign);
3784 
3785     return TrackUse;
3786   }
3787 
3788   /// See AbstractAttribute::getAsStr().
3789   const std::string getAsStr() const override {
3790     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3791                                 "-" + std::to_string(getAssumedAlign()) + ">")
3792                              : "unknown-align";
3793   }
3794 };
3795 
3796 /// Align attribute for a floating value.
3797 struct AAAlignFloating : AAAlignImpl {
3798   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3799 
3800   /// See AbstractAttribute::updateImpl(...).
3801   ChangeStatus updateImpl(Attributor &A) override {
3802     const DataLayout &DL = A.getDataLayout();
3803 
3804     auto VisitValueCB = [&](Value &V, const Instruction *,
3805                             AAAlign::StateType &T, bool Stripped) -> bool {
3806       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3807       if (!Stripped && this == &AA) {
3808         // Use only IR information if we did not strip anything.
3809         Align PA = V.getPointerAlignment(DL);
3810         T.takeKnownMaximum(PA.value());
3811         T.indicatePessimisticFixpoint();
3812       } else {
3813         // Use abstract attribute information.
3814         const AAAlign::StateType &DS =
3815             static_cast<const AAAlign::StateType &>(AA.getState());
3816         T ^= DS;
3817       }
3818       return T.isValidState();
3819     };
3820 
3821     StateType T;
3822     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3823                                                    VisitValueCB, getCtxI()))
3824       return indicatePessimisticFixpoint();
3825 
3826     // TODO: If we know we visited all incoming values, thus no are assumed
3827     // dead, we can take the known information from the state T.
3828     return clampStateAndIndicateChange(getState(), T);
3829   }
3830 
3831   /// See AbstractAttribute::trackStatistics()
3832   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3833 };
3834 
3835 /// Align attribute for function return value.
3836 struct AAAlignReturned final
3837     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3838   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3839       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3840 
3841   /// See AbstractAttribute::trackStatistics()
3842   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3843 };
3844 
3845 /// Align attribute for function argument.
3846 struct AAAlignArgument final
3847     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3848   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3849   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3850 
3851   /// See AbstractAttribute::manifest(...).
3852   ChangeStatus manifest(Attributor &A) override {
3853     // If the associated argument is involved in a must-tail call we give up
3854     // because we would need to keep the argument alignments of caller and
3855     // callee in-sync. Just does not seem worth the trouble right now.
3856     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3857       return ChangeStatus::UNCHANGED;
3858     return Base::manifest(A);
3859   }
3860 
3861   /// See AbstractAttribute::trackStatistics()
3862   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3863 };
3864 
3865 struct AAAlignCallSiteArgument final : AAAlignFloating {
3866   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3867       : AAAlignFloating(IRP, A) {}
3868 
3869   /// See AbstractAttribute::manifest(...).
3870   ChangeStatus manifest(Attributor &A) override {
3871     // If the associated argument is involved in a must-tail call we give up
3872     // because we would need to keep the argument alignments of caller and
3873     // callee in-sync. Just does not seem worth the trouble right now.
3874     if (Argument *Arg = getAssociatedArgument())
3875       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3876         return ChangeStatus::UNCHANGED;
3877     ChangeStatus Changed = AAAlignImpl::manifest(A);
3878     Align InheritAlign =
3879         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3880     if (InheritAlign >= getAssumedAlign())
3881       Changed = ChangeStatus::UNCHANGED;
3882     return Changed;
3883   }
3884 
3885   /// See AbstractAttribute::updateImpl(Attributor &A).
3886   ChangeStatus updateImpl(Attributor &A) override {
3887     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3888     if (Argument *Arg = getAssociatedArgument()) {
3889       // We only take known information from the argument
3890       // so we do not need to track a dependence.
3891       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3892           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3893       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3894     }
3895     return Changed;
3896   }
3897 
3898   /// See AbstractAttribute::trackStatistics()
3899   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3900 };
3901 
3902 /// Align attribute deduction for a call site return value.
3903 struct AAAlignCallSiteReturned final
3904     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3905   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3906   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3907       : Base(IRP, A) {}
3908 
3909   /// See AbstractAttribute::initialize(...).
3910   void initialize(Attributor &A) override {
3911     Base::initialize(A);
3912     Function *F = getAssociatedFunction();
3913     if (!F)
3914       indicatePessimisticFixpoint();
3915   }
3916 
3917   /// See AbstractAttribute::trackStatistics()
3918   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3919 };
3920 
3921 /// ------------------ Function No-Return Attribute ----------------------------
3922 struct AANoReturnImpl : public AANoReturn {
3923   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3924 
3925   /// See AbstractAttribute::initialize(...).
3926   void initialize(Attributor &A) override {
3927     AANoReturn::initialize(A);
3928     Function *F = getAssociatedFunction();
3929     if (!F)
3930       indicatePessimisticFixpoint();
3931   }
3932 
3933   /// See AbstractAttribute::getAsStr().
3934   const std::string getAsStr() const override {
3935     return getAssumed() ? "noreturn" : "may-return";
3936   }
3937 
3938   /// See AbstractAttribute::updateImpl(Attributor &A).
3939   virtual ChangeStatus updateImpl(Attributor &A) override {
3940     auto CheckForNoReturn = [](Instruction &) { return false; };
3941     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3942                                    {(unsigned)Instruction::Ret}))
3943       return indicatePessimisticFixpoint();
3944     return ChangeStatus::UNCHANGED;
3945   }
3946 };
3947 
3948 struct AANoReturnFunction final : AANoReturnImpl {
3949   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3950       : AANoReturnImpl(IRP, A) {}
3951 
3952   /// See AbstractAttribute::trackStatistics()
3953   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3954 };
3955 
3956 /// NoReturn attribute deduction for a call sites.
3957 struct AANoReturnCallSite final : AANoReturnImpl {
3958   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3959       : AANoReturnImpl(IRP, A) {}
3960 
3961   /// See AbstractAttribute::updateImpl(...).
3962   ChangeStatus updateImpl(Attributor &A) override {
3963     // TODO: Once we have call site specific value information we can provide
3964     //       call site specific liveness information and then it makes
3965     //       sense to specialize attributes for call sites arguments instead of
3966     //       redirecting requests to the callee argument.
3967     Function *F = getAssociatedFunction();
3968     const IRPosition &FnPos = IRPosition::function(*F);
3969     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3970     return clampStateAndIndicateChange(
3971         getState(),
3972         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3973   }
3974 
3975   /// See AbstractAttribute::trackStatistics()
3976   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3977 };
3978 
3979 /// ----------------------- Variable Capturing ---------------------------------
3980 
3981 /// A class to hold the state of for no-capture attributes.
3982 struct AANoCaptureImpl : public AANoCapture {
3983   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3984 
3985   /// See AbstractAttribute::initialize(...).
3986   void initialize(Attributor &A) override {
3987     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3988       indicateOptimisticFixpoint();
3989       return;
3990     }
3991     Function *AnchorScope = getAnchorScope();
3992     if (isFnInterfaceKind() &&
3993         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3994       indicatePessimisticFixpoint();
3995       return;
3996     }
3997 
3998     // You cannot "capture" null in the default address space.
3999     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4000         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4001       indicateOptimisticFixpoint();
4002       return;
4003     }
4004 
4005     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
4006 
4007     // Check what state the associated function can actually capture.
4008     if (F)
4009       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4010     else
4011       indicatePessimisticFixpoint();
4012   }
4013 
4014   /// See AbstractAttribute::updateImpl(...).
4015   ChangeStatus updateImpl(Attributor &A) override;
4016 
4017   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4018   virtual void
4019   getDeducedAttributes(LLVMContext &Ctx,
4020                        SmallVectorImpl<Attribute> &Attrs) const override {
4021     if (!isAssumedNoCaptureMaybeReturned())
4022       return;
4023 
4024     if (getArgNo() >= 0) {
4025       if (isAssumedNoCapture())
4026         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4027       else if (ManifestInternal)
4028         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4029     }
4030   }
4031 
4032   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4033   /// depending on the ability of the function associated with \p IRP to capture
4034   /// state in memory and through "returning/throwing", respectively.
4035   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4036                                                    const Function &F,
4037                                                    BitIntegerState &State) {
4038     // TODO: Once we have memory behavior attributes we should use them here.
4039 
4040     // If we know we cannot communicate or write to memory, we do not care about
4041     // ptr2int anymore.
4042     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4043         F.getReturnType()->isVoidTy()) {
4044       State.addKnownBits(NO_CAPTURE);
4045       return;
4046     }
4047 
4048     // A function cannot capture state in memory if it only reads memory, it can
4049     // however return/throw state and the state might be influenced by the
4050     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4051     if (F.onlyReadsMemory())
4052       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4053 
4054     // A function cannot communicate state back if it does not through
4055     // exceptions and doesn not return values.
4056     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4057       State.addKnownBits(NOT_CAPTURED_IN_RET);
4058 
4059     // Check existing "returned" attributes.
4060     int ArgNo = IRP.getArgNo();
4061     if (F.doesNotThrow() && ArgNo >= 0) {
4062       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4063         if (F.hasParamAttribute(u, Attribute::Returned)) {
4064           if (u == unsigned(ArgNo))
4065             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4066           else if (F.onlyReadsMemory())
4067             State.addKnownBits(NO_CAPTURE);
4068           else
4069             State.addKnownBits(NOT_CAPTURED_IN_RET);
4070           break;
4071         }
4072     }
4073   }
4074 
4075   /// See AbstractState::getAsStr().
4076   const std::string getAsStr() const override {
4077     if (isKnownNoCapture())
4078       return "known not-captured";
4079     if (isAssumedNoCapture())
4080       return "assumed not-captured";
4081     if (isKnownNoCaptureMaybeReturned())
4082       return "known not-captured-maybe-returned";
4083     if (isAssumedNoCaptureMaybeReturned())
4084       return "assumed not-captured-maybe-returned";
4085     return "assumed-captured";
4086   }
4087 };
4088 
4089 /// Attributor-aware capture tracker.
4090 struct AACaptureUseTracker final : public CaptureTracker {
4091 
4092   /// Create a capture tracker that can lookup in-flight abstract attributes
4093   /// through the Attributor \p A.
4094   ///
4095   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4096   /// search is stopped. If a use leads to a return instruction,
4097   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4098   /// If a use leads to a ptr2int which may capture the value,
4099   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4100   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4101   /// set. All values in \p PotentialCopies are later tracked as well. For every
4102   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4103   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4104   /// conservatively set to true.
4105   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4106                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4107                       SmallVectorImpl<const Value *> &PotentialCopies,
4108                       unsigned &RemainingUsesToExplore)
4109       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4110         PotentialCopies(PotentialCopies),
4111         RemainingUsesToExplore(RemainingUsesToExplore) {}
4112 
4113   /// Determine if \p V maybe captured. *Also updates the state!*
4114   bool valueMayBeCaptured(const Value *V) {
4115     if (V->getType()->isPointerTy()) {
4116       PointerMayBeCaptured(V, this);
4117     } else {
4118       State.indicatePessimisticFixpoint();
4119     }
4120     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4121   }
4122 
4123   /// See CaptureTracker::tooManyUses().
4124   void tooManyUses() override {
4125     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4126   }
4127 
4128   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4129     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4130       return true;
4131     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4132         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4133         DepClassTy::OPTIONAL);
4134     return DerefAA.getAssumedDereferenceableBytes();
4135   }
4136 
4137   /// See CaptureTracker::captured(...).
4138   bool captured(const Use *U) override {
4139     Instruction *UInst = cast<Instruction>(U->getUser());
4140     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4141                       << "\n");
4142 
4143     // Because we may reuse the tracker multiple times we keep track of the
4144     // number of explored uses ourselves as well.
4145     if (RemainingUsesToExplore-- == 0) {
4146       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4147       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4148                           /* Return */ true);
4149     }
4150 
4151     // Deal with ptr2int by following uses.
4152     if (isa<PtrToIntInst>(UInst)) {
4153       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4154       return valueMayBeCaptured(UInst);
4155     }
4156 
4157     // Explicitly catch return instructions.
4158     if (isa<ReturnInst>(UInst))
4159       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4160                           /* Return */ true);
4161 
4162     // For now we only use special logic for call sites. However, the tracker
4163     // itself knows about a lot of other non-capturing cases already.
4164     auto *CB = dyn_cast<CallBase>(UInst);
4165     if (!CB || !CB->isArgOperand(U))
4166       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4167                           /* Return */ true);
4168 
4169     unsigned ArgNo = CB->getArgOperandNo(U);
4170     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4171     // If we have a abstract no-capture attribute for the argument we can use
4172     // it to justify a non-capture attribute here. This allows recursion!
4173     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4174     if (ArgNoCaptureAA.isAssumedNoCapture())
4175       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4176                           /* Return */ false);
4177     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4178       addPotentialCopy(*CB);
4179       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4180                           /* Return */ false);
4181     }
4182 
4183     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4184     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4185                         /* Return */ true);
4186   }
4187 
4188   /// Register \p CS as potential copy of the value we are checking.
4189   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4190 
4191   /// See CaptureTracker::shouldExplore(...).
4192   bool shouldExplore(const Use *U) override {
4193     // Check liveness and ignore droppable users.
4194     return !U->getUser()->isDroppable() &&
4195            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4196   }
4197 
4198   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4199   /// \p CapturedInRet, then return the appropriate value for use in the
4200   /// CaptureTracker::captured() interface.
4201   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4202                     bool CapturedInRet) {
4203     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4204                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4205     if (CapturedInMem)
4206       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4207     if (CapturedInInt)
4208       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4209     if (CapturedInRet)
4210       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4211     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4212   }
4213 
4214 private:
4215   /// The attributor providing in-flight abstract attributes.
4216   Attributor &A;
4217 
4218   /// The abstract attribute currently updated.
4219   AANoCapture &NoCaptureAA;
4220 
4221   /// The abstract liveness state.
4222   const AAIsDead &IsDeadAA;
4223 
4224   /// The state currently updated.
4225   AANoCapture::StateType &State;
4226 
4227   /// Set of potential copies of the tracked value.
4228   SmallVectorImpl<const Value *> &PotentialCopies;
4229 
4230   /// Global counter to limit the number of explored uses.
4231   unsigned &RemainingUsesToExplore;
4232 };
4233 
4234 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4235   const IRPosition &IRP = getIRPosition();
4236   const Value *V =
4237       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4238   if (!V)
4239     return indicatePessimisticFixpoint();
4240 
4241   const Function *F =
4242       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4243   assert(F && "Expected a function!");
4244   const IRPosition &FnPos = IRPosition::function(*F);
4245   const auto &IsDeadAA =
4246       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4247 
4248   AANoCapture::StateType T;
4249 
4250   // Readonly means we cannot capture through memory.
4251   const auto &FnMemAA =
4252       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4253   if (FnMemAA.isAssumedReadOnly()) {
4254     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4255     if (FnMemAA.isKnownReadOnly())
4256       addKnownBits(NOT_CAPTURED_IN_MEM);
4257     else
4258       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4259   }
4260 
4261   // Make sure all returned values are different than the underlying value.
4262   // TODO: we could do this in a more sophisticated way inside
4263   //       AAReturnedValues, e.g., track all values that escape through returns
4264   //       directly somehow.
4265   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4266     bool SeenConstant = false;
4267     for (auto &It : RVAA.returned_values()) {
4268       if (isa<Constant>(It.first)) {
4269         if (SeenConstant)
4270           return false;
4271         SeenConstant = true;
4272       } else if (!isa<Argument>(It.first) ||
4273                  It.first == getAssociatedArgument())
4274         return false;
4275     }
4276     return true;
4277   };
4278 
4279   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4280       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4281   if (NoUnwindAA.isAssumedNoUnwind()) {
4282     bool IsVoidTy = F->getReturnType()->isVoidTy();
4283     const AAReturnedValues *RVAA =
4284         IsVoidTy ? nullptr
4285                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4286                                                  /* TrackDependence */ true,
4287                                                  DepClassTy::OPTIONAL);
4288     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4289       T.addKnownBits(NOT_CAPTURED_IN_RET);
4290       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4291         return ChangeStatus::UNCHANGED;
4292       if (NoUnwindAA.isKnownNoUnwind() &&
4293           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4294         addKnownBits(NOT_CAPTURED_IN_RET);
4295         if (isKnown(NOT_CAPTURED_IN_MEM))
4296           return indicateOptimisticFixpoint();
4297       }
4298     }
4299   }
4300 
4301   // Use the CaptureTracker interface and logic with the specialized tracker,
4302   // defined in AACaptureUseTracker, that can look at in-flight abstract
4303   // attributes and directly updates the assumed state.
4304   SmallVector<const Value *, 4> PotentialCopies;
4305   unsigned RemainingUsesToExplore =
4306       getDefaultMaxUsesToExploreForCaptureTracking();
4307   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4308                               RemainingUsesToExplore);
4309 
4310   // Check all potential copies of the associated value until we can assume
4311   // none will be captured or we have to assume at least one might be.
4312   unsigned Idx = 0;
4313   PotentialCopies.push_back(V);
4314   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4315     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4316 
4317   AANoCapture::StateType &S = getState();
4318   auto Assumed = S.getAssumed();
4319   S.intersectAssumedBits(T.getAssumed());
4320   if (!isAssumedNoCaptureMaybeReturned())
4321     return indicatePessimisticFixpoint();
4322   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4323                                    : ChangeStatus::CHANGED;
4324 }
4325 
4326 /// NoCapture attribute for function arguments.
4327 struct AANoCaptureArgument final : AANoCaptureImpl {
4328   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4329       : AANoCaptureImpl(IRP, A) {}
4330 
4331   /// See AbstractAttribute::trackStatistics()
4332   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4333 };
4334 
4335 /// NoCapture attribute for call site arguments.
4336 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4337   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4338       : AANoCaptureImpl(IRP, A) {}
4339 
4340   /// See AbstractAttribute::initialize(...).
4341   void initialize(Attributor &A) override {
4342     if (Argument *Arg = getAssociatedArgument())
4343       if (Arg->hasByValAttr())
4344         indicateOptimisticFixpoint();
4345     AANoCaptureImpl::initialize(A);
4346   }
4347 
4348   /// See AbstractAttribute::updateImpl(...).
4349   ChangeStatus updateImpl(Attributor &A) override {
4350     // TODO: Once we have call site specific value information we can provide
4351     //       call site specific liveness information and then it makes
4352     //       sense to specialize attributes for call sites arguments instead of
4353     //       redirecting requests to the callee argument.
4354     Argument *Arg = getAssociatedArgument();
4355     if (!Arg)
4356       return indicatePessimisticFixpoint();
4357     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4358     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4359     return clampStateAndIndicateChange(
4360         getState(),
4361         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4362   }
4363 
4364   /// See AbstractAttribute::trackStatistics()
4365   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4366 };
4367 
4368 /// NoCapture attribute for floating values.
4369 struct AANoCaptureFloating final : AANoCaptureImpl {
4370   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4371       : AANoCaptureImpl(IRP, A) {}
4372 
4373   /// See AbstractAttribute::trackStatistics()
4374   void trackStatistics() const override {
4375     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4376   }
4377 };
4378 
4379 /// NoCapture attribute for function return value.
4380 struct AANoCaptureReturned final : AANoCaptureImpl {
4381   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4382       : AANoCaptureImpl(IRP, A) {
4383     llvm_unreachable("NoCapture is not applicable to function returns!");
4384   }
4385 
4386   /// See AbstractAttribute::initialize(...).
4387   void initialize(Attributor &A) override {
4388     llvm_unreachable("NoCapture is not applicable to function returns!");
4389   }
4390 
4391   /// See AbstractAttribute::updateImpl(...).
4392   ChangeStatus updateImpl(Attributor &A) override {
4393     llvm_unreachable("NoCapture is not applicable to function returns!");
4394   }
4395 
4396   /// See AbstractAttribute::trackStatistics()
4397   void trackStatistics() const override {}
4398 };
4399 
4400 /// NoCapture attribute deduction for a call site return value.
4401 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4402   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4403       : AANoCaptureImpl(IRP, A) {}
4404 
4405   /// See AbstractAttribute::trackStatistics()
4406   void trackStatistics() const override {
4407     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4408   }
4409 };
4410 
4411 /// ------------------ Value Simplify Attribute ----------------------------
4412 struct AAValueSimplifyImpl : AAValueSimplify {
4413   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4414       : AAValueSimplify(IRP, A) {}
4415 
4416   /// See AbstractAttribute::initialize(...).
4417   void initialize(Attributor &A) override {
4418     if (getAssociatedValue().getType()->isVoidTy())
4419       indicatePessimisticFixpoint();
4420   }
4421 
4422   /// See AbstractAttribute::getAsStr().
4423   const std::string getAsStr() const override {
4424     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4425                         : "not-simple";
4426   }
4427 
4428   /// See AbstractAttribute::trackStatistics()
4429   void trackStatistics() const override {}
4430 
4431   /// See AAValueSimplify::getAssumedSimplifiedValue()
4432   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4433     if (!getAssumed())
4434       return const_cast<Value *>(&getAssociatedValue());
4435     return SimplifiedAssociatedValue;
4436   }
4437 
4438   /// Helper function for querying AAValueSimplify and updating candicate.
4439   /// \param QueryingValue Value trying to unify with SimplifiedValue
4440   /// \param AccumulatedSimplifiedValue Current simplification result.
4441   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4442                              Value &QueryingValue,
4443                              Optional<Value *> &AccumulatedSimplifiedValue) {
4444     // FIXME: Add a typecast support.
4445 
4446     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4447         QueryingAA, IRPosition::value(QueryingValue));
4448 
4449     Optional<Value *> QueryingValueSimplified =
4450         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4451 
4452     if (!QueryingValueSimplified.hasValue())
4453       return true;
4454 
4455     if (!QueryingValueSimplified.getValue())
4456       return false;
4457 
4458     Value &QueryingValueSimplifiedUnwrapped =
4459         *QueryingValueSimplified.getValue();
4460 
4461     if (AccumulatedSimplifiedValue.hasValue() &&
4462         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4463         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4464       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4465     if (AccumulatedSimplifiedValue.hasValue() &&
4466         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4467       return true;
4468 
4469     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4470                       << " is assumed to be "
4471                       << QueryingValueSimplifiedUnwrapped << "\n");
4472 
4473     AccumulatedSimplifiedValue = QueryingValueSimplified;
4474     return true;
4475   }
4476 
4477   /// Returns a candidate is found or not
4478   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4479     if (!getAssociatedValue().getType()->isIntegerTy())
4480       return false;
4481 
4482     const auto &AA =
4483         A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false);
4484 
4485     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4486 
4487     if (!COpt.hasValue()) {
4488       SimplifiedAssociatedValue = llvm::None;
4489       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4490       return true;
4491     }
4492     if (auto *C = COpt.getValue()) {
4493       SimplifiedAssociatedValue = C;
4494       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4495       return true;
4496     }
4497     return false;
4498   }
4499 
4500   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4501     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4502       return true;
4503     if (askSimplifiedValueFor<AAPotentialValues>(A))
4504       return true;
4505     return false;
4506   }
4507 
4508   /// See AbstractAttribute::manifest(...).
4509   ChangeStatus manifest(Attributor &A) override {
4510     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4511 
4512     if (SimplifiedAssociatedValue.hasValue() &&
4513         !SimplifiedAssociatedValue.getValue())
4514       return Changed;
4515 
4516     Value &V = getAssociatedValue();
4517     auto *C = SimplifiedAssociatedValue.hasValue()
4518                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4519                   : UndefValue::get(V.getType());
4520     if (C) {
4521       // We can replace the AssociatedValue with the constant.
4522       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4523         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4524                           << " :: " << *this << "\n");
4525         if (A.changeValueAfterManifest(V, *C))
4526           Changed = ChangeStatus::CHANGED;
4527       }
4528     }
4529 
4530     return Changed | AAValueSimplify::manifest(A);
4531   }
4532 
4533   /// See AbstractState::indicatePessimisticFixpoint(...).
4534   ChangeStatus indicatePessimisticFixpoint() override {
4535     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4536     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4537     SimplifiedAssociatedValue = &getAssociatedValue();
4538     indicateOptimisticFixpoint();
4539     return ChangeStatus::CHANGED;
4540   }
4541 
4542 protected:
4543   // An assumed simplified value. Initially, it is set to Optional::None, which
4544   // means that the value is not clear under current assumption. If in the
4545   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4546   // returns orignal associated value.
4547   Optional<Value *> SimplifiedAssociatedValue;
4548 };
4549 
4550 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4551   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4552       : AAValueSimplifyImpl(IRP, A) {}
4553 
4554   void initialize(Attributor &A) override {
4555     AAValueSimplifyImpl::initialize(A);
4556     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4557       indicatePessimisticFixpoint();
4558     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4559                  Attribute::StructRet, Attribute::Nest},
4560                 /* IgnoreSubsumingPositions */ true))
4561       indicatePessimisticFixpoint();
4562 
4563     // FIXME: This is a hack to prevent us from propagating function poiner in
4564     // the new pass manager CGSCC pass as it creates call edges the
4565     // CallGraphUpdater cannot handle yet.
4566     Value &V = getAssociatedValue();
4567     if (V.getType()->isPointerTy() &&
4568         V.getType()->getPointerElementType()->isFunctionTy() &&
4569         !A.isModulePass())
4570       indicatePessimisticFixpoint();
4571   }
4572 
4573   /// See AbstractAttribute::updateImpl(...).
4574   ChangeStatus updateImpl(Attributor &A) override {
4575     // Byval is only replacable if it is readonly otherwise we would write into
4576     // the replaced value and not the copy that byval creates implicitly.
4577     Argument *Arg = getAssociatedArgument();
4578     if (Arg->hasByValAttr()) {
4579       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4580       //       there is no race by not copying a constant byval.
4581       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4582       if (!MemAA.isAssumedReadOnly())
4583         return indicatePessimisticFixpoint();
4584     }
4585 
4586     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4587 
4588     auto PredForCallSite = [&](AbstractCallSite ACS) {
4589       const IRPosition &ACSArgPos =
4590           IRPosition::callsite_argument(ACS, getArgNo());
4591       // Check if a coresponding argument was found or if it is on not
4592       // associated (which can happen for callback calls).
4593       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4594         return false;
4595 
4596       // We can only propagate thread independent values through callbacks.
4597       // This is different to direct/indirect call sites because for them we
4598       // know the thread executing the caller and callee is the same. For
4599       // callbacks this is not guaranteed, thus a thread dependent value could
4600       // be different for the caller and callee, making it invalid to propagate.
4601       Value &ArgOp = ACSArgPos.getAssociatedValue();
4602       if (ACS.isCallbackCall())
4603         if (auto *C = dyn_cast<Constant>(&ArgOp))
4604           if (C->isThreadDependent())
4605             return false;
4606       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4607     };
4608 
4609     bool AllCallSitesKnown;
4610     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4611                                 AllCallSitesKnown))
4612       if (!askSimplifiedValueForOtherAAs(A))
4613         return indicatePessimisticFixpoint();
4614 
4615     // If a candicate was found in this update, return CHANGED.
4616     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4617                ? ChangeStatus::UNCHANGED
4618                : ChangeStatus ::CHANGED;
4619   }
4620 
4621   /// See AbstractAttribute::trackStatistics()
4622   void trackStatistics() const override {
4623     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4624   }
4625 };
4626 
4627 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4628   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4629       : AAValueSimplifyImpl(IRP, A) {}
4630 
4631   /// See AbstractAttribute::updateImpl(...).
4632   ChangeStatus updateImpl(Attributor &A) override {
4633     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4634 
4635     auto PredForReturned = [&](Value &V) {
4636       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4637     };
4638 
4639     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4640       if (!askSimplifiedValueForOtherAAs(A))
4641         return indicatePessimisticFixpoint();
4642 
4643     // If a candicate was found in this update, return CHANGED.
4644     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4645                ? ChangeStatus::UNCHANGED
4646                : ChangeStatus ::CHANGED;
4647   }
4648 
4649   ChangeStatus manifest(Attributor &A) override {
4650     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4651 
4652     if (SimplifiedAssociatedValue.hasValue() &&
4653         !SimplifiedAssociatedValue.getValue())
4654       return Changed;
4655 
4656     Value &V = getAssociatedValue();
4657     auto *C = SimplifiedAssociatedValue.hasValue()
4658                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4659                   : UndefValue::get(V.getType());
4660     if (C) {
4661       auto PredForReturned =
4662           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4663             // We can replace the AssociatedValue with the constant.
4664             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4665               return true;
4666 
4667             for (ReturnInst *RI : RetInsts) {
4668               if (RI->getFunction() != getAnchorScope())
4669                 continue;
4670               auto *RC = C;
4671               if (RC->getType() != RI->getReturnValue()->getType())
4672                 RC = ConstantExpr::getBitCast(RC,
4673                                               RI->getReturnValue()->getType());
4674               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4675                                 << " in " << *RI << " :: " << *this << "\n");
4676               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4677                 Changed = ChangeStatus::CHANGED;
4678             }
4679             return true;
4680           };
4681       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4682     }
4683 
4684     return Changed | AAValueSimplify::manifest(A);
4685   }
4686 
4687   /// See AbstractAttribute::trackStatistics()
4688   void trackStatistics() const override {
4689     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4690   }
4691 };
4692 
4693 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4694   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4695       : AAValueSimplifyImpl(IRP, A) {}
4696 
4697   /// See AbstractAttribute::initialize(...).
4698   void initialize(Attributor &A) override {
4699     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4700     //        Needs investigation.
4701     // AAValueSimplifyImpl::initialize(A);
4702     Value &V = getAnchorValue();
4703 
4704     // TODO: add other stuffs
4705     if (isa<Constant>(V))
4706       indicatePessimisticFixpoint();
4707   }
4708 
4709   /// See AbstractAttribute::updateImpl(...).
4710   ChangeStatus updateImpl(Attributor &A) override {
4711     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4712 
4713     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4714                             bool Stripped) -> bool {
4715       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4716       if (!Stripped && this == &AA) {
4717         // TODO: Look the instruction and check recursively.
4718 
4719         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4720                           << "\n");
4721         return false;
4722       }
4723       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4724     };
4725 
4726     bool Dummy = false;
4727     if (!genericValueTraversal<AAValueSimplify, bool>(
4728             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4729             /* UseValueSimplify */ false))
4730       if (!askSimplifiedValueForOtherAAs(A))
4731         return indicatePessimisticFixpoint();
4732 
4733     // If a candicate was found in this update, return CHANGED.
4734 
4735     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4736                ? ChangeStatus::UNCHANGED
4737                : ChangeStatus ::CHANGED;
4738   }
4739 
4740   /// See AbstractAttribute::trackStatistics()
4741   void trackStatistics() const override {
4742     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4743   }
4744 };
4745 
4746 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4747   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4748       : AAValueSimplifyImpl(IRP, A) {}
4749 
4750   /// See AbstractAttribute::initialize(...).
4751   void initialize(Attributor &A) override {
4752     SimplifiedAssociatedValue = &getAnchorValue();
4753     indicateOptimisticFixpoint();
4754   }
4755   /// See AbstractAttribute::initialize(...).
4756   ChangeStatus updateImpl(Attributor &A) override {
4757     llvm_unreachable(
4758         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4759   }
4760   /// See AbstractAttribute::trackStatistics()
4761   void trackStatistics() const override {
4762     STATS_DECLTRACK_FN_ATTR(value_simplify)
4763   }
4764 };
4765 
4766 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4767   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4768       : AAValueSimplifyFunction(IRP, A) {}
4769   /// See AbstractAttribute::trackStatistics()
4770   void trackStatistics() const override {
4771     STATS_DECLTRACK_CS_ATTR(value_simplify)
4772   }
4773 };
4774 
4775 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4776   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4777       : AAValueSimplifyReturned(IRP, A) {}
4778 
4779   /// See AbstractAttribute::manifest(...).
4780   ChangeStatus manifest(Attributor &A) override {
4781     return AAValueSimplifyImpl::manifest(A);
4782   }
4783 
4784   void trackStatistics() const override {
4785     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4786   }
4787 };
4788 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4789   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4790       : AAValueSimplifyFloating(IRP, A) {}
4791 
4792   /// See AbstractAttribute::manifest(...).
4793   ChangeStatus manifest(Attributor &A) override {
4794     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4795 
4796     if (SimplifiedAssociatedValue.hasValue() &&
4797         !SimplifiedAssociatedValue.getValue())
4798       return Changed;
4799 
4800     Value &V = getAssociatedValue();
4801     auto *C = SimplifiedAssociatedValue.hasValue()
4802                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4803                   : UndefValue::get(V.getType());
4804     if (C) {
4805       Use &U = cast<CallBase>(&getAnchorValue())->getArgOperandUse(getArgNo());
4806       // We can replace the AssociatedValue with the constant.
4807       if (&V != C && V.getType() == C->getType()) {
4808         if (A.changeUseAfterManifest(U, *C))
4809           Changed = ChangeStatus::CHANGED;
4810       }
4811     }
4812 
4813     return Changed | AAValueSimplify::manifest(A);
4814   }
4815 
4816   void trackStatistics() const override {
4817     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4818   }
4819 };
4820 
4821 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4822 struct AAHeapToStackImpl : public AAHeapToStack {
4823   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4824       : AAHeapToStack(IRP, A) {}
4825 
4826   const std::string getAsStr() const override {
4827     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4828   }
4829 
4830   ChangeStatus manifest(Attributor &A) override {
4831     assert(getState().isValidState() &&
4832            "Attempted to manifest an invalid state!");
4833 
4834     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4835     Function *F = getAnchorScope();
4836     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4837 
4838     for (Instruction *MallocCall : MallocCalls) {
4839       // This malloc cannot be replaced.
4840       if (BadMallocCalls.count(MallocCall))
4841         continue;
4842 
4843       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4844         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4845         A.deleteAfterManifest(*FreeCall);
4846         HasChanged = ChangeStatus::CHANGED;
4847       }
4848 
4849       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4850                         << "\n");
4851 
4852       Align Alignment;
4853       Constant *Size;
4854       if (isCallocLikeFn(MallocCall, TLI)) {
4855         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4856         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4857         APInt TotalSize = SizeT->getValue() * Num->getValue();
4858         Size =
4859             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4860       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4861         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4862         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4863                                    ->getValue()
4864                                    .getZExtValue())
4865                         .valueOrOne();
4866       } else {
4867         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4868       }
4869 
4870       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4871       Instruction *AI =
4872           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4873                          "", MallocCall->getNextNode());
4874 
4875       if (AI->getType() != MallocCall->getType())
4876         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4877                              AI->getNextNode());
4878 
4879       A.changeValueAfterManifest(*MallocCall, *AI);
4880 
4881       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4882         auto *NBB = II->getNormalDest();
4883         BranchInst::Create(NBB, MallocCall->getParent());
4884         A.deleteAfterManifest(*MallocCall);
4885       } else {
4886         A.deleteAfterManifest(*MallocCall);
4887       }
4888 
4889       // Zero out the allocated memory if it was a calloc.
4890       if (isCallocLikeFn(MallocCall, TLI)) {
4891         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4892                                    AI->getNextNode());
4893         Value *Ops[] = {
4894             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4895             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4896 
4897         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4898         Module *M = F->getParent();
4899         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4900         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4901       }
4902       HasChanged = ChangeStatus::CHANGED;
4903     }
4904 
4905     return HasChanged;
4906   }
4907 
4908   /// Collection of all malloc calls in a function.
4909   SmallSetVector<Instruction *, 4> MallocCalls;
4910 
4911   /// Collection of malloc calls that cannot be converted.
4912   DenseSet<const Instruction *> BadMallocCalls;
4913 
4914   /// A map for each malloc call to the set of associated free calls.
4915   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4916 
4917   ChangeStatus updateImpl(Attributor &A) override;
4918 };
4919 
4920 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4921   const Function *F = getAnchorScope();
4922   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4923 
4924   MustBeExecutedContextExplorer &Explorer =
4925       A.getInfoCache().getMustBeExecutedContextExplorer();
4926 
4927   auto FreeCheck = [&](Instruction &I) {
4928     const auto &Frees = FreesForMalloc.lookup(&I);
4929     if (Frees.size() != 1)
4930       return false;
4931     Instruction *UniqueFree = *Frees.begin();
4932     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4933   };
4934 
4935   auto UsesCheck = [&](Instruction &I) {
4936     bool ValidUsesOnly = true;
4937     bool MustUse = true;
4938     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4939       Instruction *UserI = cast<Instruction>(U.getUser());
4940       if (isa<LoadInst>(UserI))
4941         return true;
4942       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4943         if (SI->getValueOperand() == U.get()) {
4944           LLVM_DEBUG(dbgs()
4945                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4946           ValidUsesOnly = false;
4947         } else {
4948           // A store into the malloc'ed memory is fine.
4949         }
4950         return true;
4951       }
4952       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4953         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4954           return true;
4955         // Record malloc.
4956         if (isFreeCall(UserI, TLI)) {
4957           if (MustUse) {
4958             FreesForMalloc[&I].insert(UserI);
4959           } else {
4960             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4961                               << *UserI << "\n");
4962             ValidUsesOnly = false;
4963           }
4964           return true;
4965         }
4966 
4967         unsigned ArgNo = CB->getArgOperandNo(&U);
4968 
4969         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4970             *this, IRPosition::callsite_argument(*CB, ArgNo));
4971 
4972         // If a callsite argument use is nofree, we are fine.
4973         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4974             *this, IRPosition::callsite_argument(*CB, ArgNo));
4975 
4976         if (!NoCaptureAA.isAssumedNoCapture() ||
4977             !ArgNoFreeAA.isAssumedNoFree()) {
4978           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4979           ValidUsesOnly = false;
4980         }
4981         return true;
4982       }
4983 
4984       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4985           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4986         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4987         Follow = true;
4988         return true;
4989       }
4990       // Unknown user for which we can not track uses further (in a way that
4991       // makes sense).
4992       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4993       ValidUsesOnly = false;
4994       return true;
4995     };
4996     A.checkForAllUses(Pred, *this, I);
4997     return ValidUsesOnly;
4998   };
4999 
5000   auto MallocCallocCheck = [&](Instruction &I) {
5001     if (BadMallocCalls.count(&I))
5002       return true;
5003 
5004     bool IsMalloc = isMallocLikeFn(&I, TLI);
5005     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5006     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5007     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5008       BadMallocCalls.insert(&I);
5009       return true;
5010     }
5011 
5012     if (IsMalloc) {
5013       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5014         if (Size->getValue().ule(MaxHeapToStackSize))
5015           if (UsesCheck(I) || FreeCheck(I)) {
5016             MallocCalls.insert(&I);
5017             return true;
5018           }
5019     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5020       // Only if the alignment and sizes are constant.
5021       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5022         if (Size->getValue().ule(MaxHeapToStackSize))
5023           if (UsesCheck(I) || FreeCheck(I)) {
5024             MallocCalls.insert(&I);
5025             return true;
5026           }
5027     } else if (IsCalloc) {
5028       bool Overflow = false;
5029       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5030         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5031           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5032                   .ule(MaxHeapToStackSize))
5033             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5034               MallocCalls.insert(&I);
5035               return true;
5036             }
5037     }
5038 
5039     BadMallocCalls.insert(&I);
5040     return true;
5041   };
5042 
5043   size_t NumBadMallocs = BadMallocCalls.size();
5044 
5045   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5046 
5047   if (NumBadMallocs != BadMallocCalls.size())
5048     return ChangeStatus::CHANGED;
5049 
5050   return ChangeStatus::UNCHANGED;
5051 }
5052 
5053 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5054   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5055       : AAHeapToStackImpl(IRP, A) {}
5056 
5057   /// See AbstractAttribute::trackStatistics().
5058   void trackStatistics() const override {
5059     STATS_DECL(
5060         MallocCalls, Function,
5061         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5062     for (auto *C : MallocCalls)
5063       if (!BadMallocCalls.count(C))
5064         ++BUILD_STAT_NAME(MallocCalls, Function);
5065   }
5066 };
5067 
5068 /// ----------------------- Privatizable Pointers ------------------------------
5069 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5070   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5071       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5072 
5073   ChangeStatus indicatePessimisticFixpoint() override {
5074     AAPrivatizablePtr::indicatePessimisticFixpoint();
5075     PrivatizableType = nullptr;
5076     return ChangeStatus::CHANGED;
5077   }
5078 
5079   /// Identify the type we can chose for a private copy of the underlying
5080   /// argument. None means it is not clear yet, nullptr means there is none.
5081   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5082 
5083   /// Return a privatizable type that encloses both T0 and T1.
5084   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5085   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5086     if (!T0.hasValue())
5087       return T1;
5088     if (!T1.hasValue())
5089       return T0;
5090     if (T0 == T1)
5091       return T0;
5092     return nullptr;
5093   }
5094 
5095   Optional<Type *> getPrivatizableType() const override {
5096     return PrivatizableType;
5097   }
5098 
5099   const std::string getAsStr() const override {
5100     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5101   }
5102 
5103 protected:
5104   Optional<Type *> PrivatizableType;
5105 };
5106 
5107 // TODO: Do this for call site arguments (probably also other values) as well.
5108 
5109 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5110   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5111       : AAPrivatizablePtrImpl(IRP, A) {}
5112 
5113   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5114   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5115     // If this is a byval argument and we know all the call sites (so we can
5116     // rewrite them), there is no need to check them explicitly.
5117     bool AllCallSitesKnown;
5118     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5119         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5120                                true, AllCallSitesKnown))
5121       return getAssociatedValue().getType()->getPointerElementType();
5122 
5123     Optional<Type *> Ty;
5124     unsigned ArgNo = getIRPosition().getArgNo();
5125 
5126     // Make sure the associated call site argument has the same type at all call
5127     // sites and it is an allocation we know is safe to privatize, for now that
5128     // means we only allow alloca instructions.
5129     // TODO: We can additionally analyze the accesses in the callee to  create
5130     //       the type from that information instead. That is a little more
5131     //       involved and will be done in a follow up patch.
5132     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5133       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5134       // Check if a coresponding argument was found or if it is one not
5135       // associated (which can happen for callback calls).
5136       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5137         return false;
5138 
5139       // Check that all call sites agree on a type.
5140       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5141       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5142 
5143       LLVM_DEBUG({
5144         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5145         if (CSTy.hasValue() && CSTy.getValue())
5146           CSTy.getValue()->print(dbgs());
5147         else if (CSTy.hasValue())
5148           dbgs() << "<nullptr>";
5149         else
5150           dbgs() << "<none>";
5151       });
5152 
5153       Ty = combineTypes(Ty, CSTy);
5154 
5155       LLVM_DEBUG({
5156         dbgs() << " : New Type: ";
5157         if (Ty.hasValue() && Ty.getValue())
5158           Ty.getValue()->print(dbgs());
5159         else if (Ty.hasValue())
5160           dbgs() << "<nullptr>";
5161         else
5162           dbgs() << "<none>";
5163         dbgs() << "\n";
5164       });
5165 
5166       return !Ty.hasValue() || Ty.getValue();
5167     };
5168 
5169     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5170       return nullptr;
5171     return Ty;
5172   }
5173 
5174   /// See AbstractAttribute::updateImpl(...).
5175   ChangeStatus updateImpl(Attributor &A) override {
5176     PrivatizableType = identifyPrivatizableType(A);
5177     if (!PrivatizableType.hasValue())
5178       return ChangeStatus::UNCHANGED;
5179     if (!PrivatizableType.getValue())
5180       return indicatePessimisticFixpoint();
5181 
5182     // The dependence is optional so we don't give up once we give up on the
5183     // alignment.
5184     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5185                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5186 
5187     // Avoid arguments with padding for now.
5188     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5189         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5190                                                 A.getInfoCache().getDL())) {
5191       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5192       return indicatePessimisticFixpoint();
5193     }
5194 
5195     // Verify callee and caller agree on how the promoted argument would be
5196     // passed.
5197     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5198     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5199     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5200     Function &Fn = *getIRPosition().getAnchorScope();
5201     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5202     ArgsToPromote.insert(getAssociatedArgument());
5203     const auto *TTI =
5204         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5205     if (!TTI ||
5206         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5207             Fn, *TTI, ArgsToPromote, Dummy) ||
5208         ArgsToPromote.empty()) {
5209       LLVM_DEBUG(
5210           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5211                  << Fn.getName() << "\n");
5212       return indicatePessimisticFixpoint();
5213     }
5214 
5215     // Collect the types that will replace the privatizable type in the function
5216     // signature.
5217     SmallVector<Type *, 16> ReplacementTypes;
5218     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5219 
5220     // Register a rewrite of the argument.
5221     Argument *Arg = getAssociatedArgument();
5222     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5223       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5224       return indicatePessimisticFixpoint();
5225     }
5226 
5227     unsigned ArgNo = Arg->getArgNo();
5228 
5229     // Helper to check if for the given call site the associated argument is
5230     // passed to a callback where the privatization would be different.
5231     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5232       SmallVector<const Use *, 4> CallbackUses;
5233       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5234       for (const Use *U : CallbackUses) {
5235         AbstractCallSite CBACS(U);
5236         assert(CBACS && CBACS.isCallbackCall());
5237         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5238           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5239 
5240           LLVM_DEBUG({
5241             dbgs()
5242                 << "[AAPrivatizablePtr] Argument " << *Arg
5243                 << "check if can be privatized in the context of its parent ("
5244                 << Arg->getParent()->getName()
5245                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5246                    "callback ("
5247                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5248                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5249                 << CBACS.getCallArgOperand(CBArg) << " vs "
5250                 << CB.getArgOperand(ArgNo) << "\n"
5251                 << "[AAPrivatizablePtr] " << CBArg << " : "
5252                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5253           });
5254 
5255           if (CBArgNo != int(ArgNo))
5256             continue;
5257           const auto &CBArgPrivAA =
5258               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5259           if (CBArgPrivAA.isValidState()) {
5260             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5261             if (!CBArgPrivTy.hasValue())
5262               continue;
5263             if (CBArgPrivTy.getValue() == PrivatizableType)
5264               continue;
5265           }
5266 
5267           LLVM_DEBUG({
5268             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5269                    << " cannot be privatized in the context of its parent ("
5270                    << Arg->getParent()->getName()
5271                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5272                       "callback ("
5273                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5274                    << ").\n[AAPrivatizablePtr] for which the argument "
5275                       "privatization is not compatible.\n";
5276           });
5277           return false;
5278         }
5279       }
5280       return true;
5281     };
5282 
5283     // Helper to check if for the given call site the associated argument is
5284     // passed to a direct call where the privatization would be different.
5285     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5286       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5287       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5288       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5289              "Expected a direct call operand for callback call operand");
5290 
5291       LLVM_DEBUG({
5292         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5293                << " check if be privatized in the context of its parent ("
5294                << Arg->getParent()->getName()
5295                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5296                   "direct call of ("
5297                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5298                << ").\n";
5299       });
5300 
5301       Function *DCCallee = DC->getCalledFunction();
5302       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5303         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5304             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5305         if (DCArgPrivAA.isValidState()) {
5306           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5307           if (!DCArgPrivTy.hasValue())
5308             return true;
5309           if (DCArgPrivTy.getValue() == PrivatizableType)
5310             return true;
5311         }
5312       }
5313 
5314       LLVM_DEBUG({
5315         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5316                << " cannot be privatized in the context of its parent ("
5317                << Arg->getParent()->getName()
5318                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5319                   "direct call of ("
5320                << ACS.getInstruction()->getCalledFunction()->getName()
5321                << ").\n[AAPrivatizablePtr] for which the argument "
5322                   "privatization is not compatible.\n";
5323       });
5324       return false;
5325     };
5326 
5327     // Helper to check if the associated argument is used at the given abstract
5328     // call site in a way that is incompatible with the privatization assumed
5329     // here.
5330     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5331       if (ACS.isDirectCall())
5332         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5333       if (ACS.isCallbackCall())
5334         return IsCompatiblePrivArgOfDirectCS(ACS);
5335       return false;
5336     };
5337 
5338     bool AllCallSitesKnown;
5339     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5340                                 AllCallSitesKnown))
5341       return indicatePessimisticFixpoint();
5342 
5343     return ChangeStatus::UNCHANGED;
5344   }
5345 
5346   /// Given a type to private \p PrivType, collect the constituates (which are
5347   /// used) in \p ReplacementTypes.
5348   static void
5349   identifyReplacementTypes(Type *PrivType,
5350                            SmallVectorImpl<Type *> &ReplacementTypes) {
5351     // TODO: For now we expand the privatization type to the fullest which can
5352     //       lead to dead arguments that need to be removed later.
5353     assert(PrivType && "Expected privatizable type!");
5354 
5355     // Traverse the type, extract constituate types on the outermost level.
5356     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5357       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5358         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5359     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5360       ReplacementTypes.append(PrivArrayType->getNumElements(),
5361                               PrivArrayType->getElementType());
5362     } else {
5363       ReplacementTypes.push_back(PrivType);
5364     }
5365   }
5366 
5367   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5368   /// The values needed are taken from the arguments of \p F starting at
5369   /// position \p ArgNo.
5370   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5371                                    unsigned ArgNo, Instruction &IP) {
5372     assert(PrivType && "Expected privatizable type!");
5373 
5374     IRBuilder<NoFolder> IRB(&IP);
5375     const DataLayout &DL = F.getParent()->getDataLayout();
5376 
5377     // Traverse the type, build GEPs and stores.
5378     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5379       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5380       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5381         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5382         Value *Ptr = constructPointer(
5383             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5384         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5385       }
5386     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5387       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5388       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5389       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5390         Value *Ptr =
5391             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5392         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5393       }
5394     } else {
5395       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5396     }
5397   }
5398 
5399   /// Extract values from \p Base according to the type \p PrivType at the
5400   /// call position \p ACS. The values are appended to \p ReplacementValues.
5401   void createReplacementValues(Align Alignment, Type *PrivType,
5402                                AbstractCallSite ACS, Value *Base,
5403                                SmallVectorImpl<Value *> &ReplacementValues) {
5404     assert(Base && "Expected base value!");
5405     assert(PrivType && "Expected privatizable type!");
5406     Instruction *IP = ACS.getInstruction();
5407 
5408     IRBuilder<NoFolder> IRB(IP);
5409     const DataLayout &DL = IP->getModule()->getDataLayout();
5410 
5411     if (Base->getType()->getPointerElementType() != PrivType)
5412       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5413                                                  "", ACS.getInstruction());
5414 
5415     // Traverse the type, build GEPs and loads.
5416     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5417       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5418       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5419         Type *PointeeTy = PrivStructType->getElementType(u);
5420         Value *Ptr =
5421             constructPointer(PointeeTy->getPointerTo(), Base,
5422                              PrivStructLayout->getElementOffset(u), IRB, DL);
5423         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5424         L->setAlignment(Alignment);
5425         ReplacementValues.push_back(L);
5426       }
5427     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5428       Type *PointeeTy = PrivArrayType->getElementType();
5429       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5430       Type *PointeePtrTy = PointeeTy->getPointerTo();
5431       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5432         Value *Ptr =
5433             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5434         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5435         L->setAlignment(Alignment);
5436         ReplacementValues.push_back(L);
5437       }
5438     } else {
5439       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5440       L->setAlignment(Alignment);
5441       ReplacementValues.push_back(L);
5442     }
5443   }
5444 
5445   /// See AbstractAttribute::manifest(...)
5446   ChangeStatus manifest(Attributor &A) override {
5447     if (!PrivatizableType.hasValue())
5448       return ChangeStatus::UNCHANGED;
5449     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5450 
5451     // Collect all tail calls in the function as we cannot allow new allocas to
5452     // escape into tail recursion.
5453     // TODO: Be smarter about new allocas escaping into tail calls.
5454     SmallVector<CallInst *, 16> TailCalls;
5455     if (!A.checkForAllInstructions(
5456             [&](Instruction &I) {
5457               CallInst &CI = cast<CallInst>(I);
5458               if (CI.isTailCall())
5459                 TailCalls.push_back(&CI);
5460               return true;
5461             },
5462             *this, {Instruction::Call}))
5463       return ChangeStatus::UNCHANGED;
5464 
5465     Argument *Arg = getAssociatedArgument();
5466     // Query AAAlign attribute for alignment of associated argument to
5467     // determine the best alignment of loads.
5468     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5469 
5470     // Callback to repair the associated function. A new alloca is placed at the
5471     // beginning and initialized with the values passed through arguments. The
5472     // new alloca replaces the use of the old pointer argument.
5473     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5474         [=](const Attributor::ArgumentReplacementInfo &ARI,
5475             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5476           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5477           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5478           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5479                                     Arg->getName() + ".priv", IP);
5480           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5481                                ArgIt->getArgNo(), *IP);
5482           Arg->replaceAllUsesWith(AI);
5483 
5484           for (CallInst *CI : TailCalls)
5485             CI->setTailCall(false);
5486         };
5487 
5488     // Callback to repair a call site of the associated function. The elements
5489     // of the privatizable type are loaded prior to the call and passed to the
5490     // new function version.
5491     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5492         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5493                       AbstractCallSite ACS,
5494                       SmallVectorImpl<Value *> &NewArgOperands) {
5495           // When no alignment is specified for the load instruction,
5496           // natural alignment is assumed.
5497           createReplacementValues(
5498               assumeAligned(AlignAA.getAssumedAlign()),
5499               PrivatizableType.getValue(), ACS,
5500               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5501               NewArgOperands);
5502         };
5503 
5504     // Collect the types that will replace the privatizable type in the function
5505     // signature.
5506     SmallVector<Type *, 16> ReplacementTypes;
5507     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5508 
5509     // Register a rewrite of the argument.
5510     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5511                                            std::move(FnRepairCB),
5512                                            std::move(ACSRepairCB)))
5513       return ChangeStatus::CHANGED;
5514     return ChangeStatus::UNCHANGED;
5515   }
5516 
5517   /// See AbstractAttribute::trackStatistics()
5518   void trackStatistics() const override {
5519     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5520   }
5521 };
5522 
5523 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5524   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5525       : AAPrivatizablePtrImpl(IRP, A) {}
5526 
5527   /// See AbstractAttribute::initialize(...).
5528   virtual void initialize(Attributor &A) override {
5529     // TODO: We can privatize more than arguments.
5530     indicatePessimisticFixpoint();
5531   }
5532 
5533   ChangeStatus updateImpl(Attributor &A) override {
5534     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5535                      "updateImpl will not be called");
5536   }
5537 
5538   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5539   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5540     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5541     if (!Obj) {
5542       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5543       return nullptr;
5544     }
5545 
5546     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5547       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5548         if (CI->isOne())
5549           return Obj->getType()->getPointerElementType();
5550     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5551       auto &PrivArgAA =
5552           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5553       if (PrivArgAA.isAssumedPrivatizablePtr())
5554         return Obj->getType()->getPointerElementType();
5555     }
5556 
5557     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5558                          "alloca nor privatizable argument: "
5559                       << *Obj << "!\n");
5560     return nullptr;
5561   }
5562 
5563   /// See AbstractAttribute::trackStatistics()
5564   void trackStatistics() const override {
5565     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5566   }
5567 };
5568 
5569 struct AAPrivatizablePtrCallSiteArgument final
5570     : public AAPrivatizablePtrFloating {
5571   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5572       : AAPrivatizablePtrFloating(IRP, A) {}
5573 
5574   /// See AbstractAttribute::initialize(...).
5575   void initialize(Attributor &A) override {
5576     if (getIRPosition().hasAttr(Attribute::ByVal))
5577       indicateOptimisticFixpoint();
5578   }
5579 
5580   /// See AbstractAttribute::updateImpl(...).
5581   ChangeStatus updateImpl(Attributor &A) override {
5582     PrivatizableType = identifyPrivatizableType(A);
5583     if (!PrivatizableType.hasValue())
5584       return ChangeStatus::UNCHANGED;
5585     if (!PrivatizableType.getValue())
5586       return indicatePessimisticFixpoint();
5587 
5588     const IRPosition &IRP = getIRPosition();
5589     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5590     if (!NoCaptureAA.isAssumedNoCapture()) {
5591       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5592       return indicatePessimisticFixpoint();
5593     }
5594 
5595     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5596     if (!NoAliasAA.isAssumedNoAlias()) {
5597       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5598       return indicatePessimisticFixpoint();
5599     }
5600 
5601     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5602     if (!MemBehaviorAA.isAssumedReadOnly()) {
5603       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5604       return indicatePessimisticFixpoint();
5605     }
5606 
5607     return ChangeStatus::UNCHANGED;
5608   }
5609 
5610   /// See AbstractAttribute::trackStatistics()
5611   void trackStatistics() const override {
5612     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5613   }
5614 };
5615 
5616 struct AAPrivatizablePtrCallSiteReturned final
5617     : public AAPrivatizablePtrFloating {
5618   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5619       : AAPrivatizablePtrFloating(IRP, A) {}
5620 
5621   /// See AbstractAttribute::initialize(...).
5622   void initialize(Attributor &A) override {
5623     // TODO: We can privatize more than arguments.
5624     indicatePessimisticFixpoint();
5625   }
5626 
5627   /// See AbstractAttribute::trackStatistics()
5628   void trackStatistics() const override {
5629     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5630   }
5631 };
5632 
5633 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5634   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5635       : AAPrivatizablePtrFloating(IRP, A) {}
5636 
5637   /// See AbstractAttribute::initialize(...).
5638   void initialize(Attributor &A) override {
5639     // TODO: We can privatize more than arguments.
5640     indicatePessimisticFixpoint();
5641   }
5642 
5643   /// See AbstractAttribute::trackStatistics()
5644   void trackStatistics() const override {
5645     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5646   }
5647 };
5648 
5649 /// -------------------- Memory Behavior Attributes ----------------------------
5650 /// Includes read-none, read-only, and write-only.
5651 /// ----------------------------------------------------------------------------
5652 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5653   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5654       : AAMemoryBehavior(IRP, A) {}
5655 
5656   /// See AbstractAttribute::initialize(...).
5657   void initialize(Attributor &A) override {
5658     intersectAssumedBits(BEST_STATE);
5659     getKnownStateFromValue(getIRPosition(), getState());
5660     IRAttribute::initialize(A);
5661   }
5662 
5663   /// Return the memory behavior information encoded in the IR for \p IRP.
5664   static void getKnownStateFromValue(const IRPosition &IRP,
5665                                      BitIntegerState &State,
5666                                      bool IgnoreSubsumingPositions = false) {
5667     SmallVector<Attribute, 2> Attrs;
5668     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5669     for (const Attribute &Attr : Attrs) {
5670       switch (Attr.getKindAsEnum()) {
5671       case Attribute::ReadNone:
5672         State.addKnownBits(NO_ACCESSES);
5673         break;
5674       case Attribute::ReadOnly:
5675         State.addKnownBits(NO_WRITES);
5676         break;
5677       case Attribute::WriteOnly:
5678         State.addKnownBits(NO_READS);
5679         break;
5680       default:
5681         llvm_unreachable("Unexpected attribute!");
5682       }
5683     }
5684 
5685     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5686       if (!I->mayReadFromMemory())
5687         State.addKnownBits(NO_READS);
5688       if (!I->mayWriteToMemory())
5689         State.addKnownBits(NO_WRITES);
5690     }
5691   }
5692 
5693   /// See AbstractAttribute::getDeducedAttributes(...).
5694   void getDeducedAttributes(LLVMContext &Ctx,
5695                             SmallVectorImpl<Attribute> &Attrs) const override {
5696     assert(Attrs.size() == 0);
5697     if (isAssumedReadNone())
5698       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5699     else if (isAssumedReadOnly())
5700       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5701     else if (isAssumedWriteOnly())
5702       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5703     assert(Attrs.size() <= 1);
5704   }
5705 
5706   /// See AbstractAttribute::manifest(...).
5707   ChangeStatus manifest(Attributor &A) override {
5708     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5709       return ChangeStatus::UNCHANGED;
5710 
5711     const IRPosition &IRP = getIRPosition();
5712 
5713     // Check if we would improve the existing attributes first.
5714     SmallVector<Attribute, 4> DeducedAttrs;
5715     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5716     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5717           return IRP.hasAttr(Attr.getKindAsEnum(),
5718                              /* IgnoreSubsumingPositions */ true);
5719         }))
5720       return ChangeStatus::UNCHANGED;
5721 
5722     // Clear existing attributes.
5723     IRP.removeAttrs(AttrKinds);
5724 
5725     // Use the generic manifest method.
5726     return IRAttribute::manifest(A);
5727   }
5728 
5729   /// See AbstractState::getAsStr().
5730   const std::string getAsStr() const override {
5731     if (isAssumedReadNone())
5732       return "readnone";
5733     if (isAssumedReadOnly())
5734       return "readonly";
5735     if (isAssumedWriteOnly())
5736       return "writeonly";
5737     return "may-read/write";
5738   }
5739 
5740   /// The set of IR attributes AAMemoryBehavior deals with.
5741   static const Attribute::AttrKind AttrKinds[3];
5742 };
5743 
5744 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5745     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5746 
5747 /// Memory behavior attribute for a floating value.
5748 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5749   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5750       : AAMemoryBehaviorImpl(IRP, A) {}
5751 
5752   /// See AbstractAttribute::initialize(...).
5753   void initialize(Attributor &A) override {
5754     AAMemoryBehaviorImpl::initialize(A);
5755     // Initialize the use vector with all direct uses of the associated value.
5756     for (const Use &U : getAssociatedValue().uses())
5757       Uses.insert(&U);
5758   }
5759 
5760   /// See AbstractAttribute::updateImpl(...).
5761   ChangeStatus updateImpl(Attributor &A) override;
5762 
5763   /// See AbstractAttribute::trackStatistics()
5764   void trackStatistics() const override {
5765     if (isAssumedReadNone())
5766       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5767     else if (isAssumedReadOnly())
5768       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5769     else if (isAssumedWriteOnly())
5770       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5771   }
5772 
5773 private:
5774   /// Return true if users of \p UserI might access the underlying
5775   /// variable/location described by \p U and should therefore be analyzed.
5776   bool followUsersOfUseIn(Attributor &A, const Use *U,
5777                           const Instruction *UserI);
5778 
5779   /// Update the state according to the effect of use \p U in \p UserI.
5780   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5781 
5782 protected:
5783   /// Container for (transitive) uses of the associated argument.
5784   SetVector<const Use *> Uses;
5785 };
5786 
5787 /// Memory behavior attribute for function argument.
5788 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5789   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5790       : AAMemoryBehaviorFloating(IRP, A) {}
5791 
5792   /// See AbstractAttribute::initialize(...).
5793   void initialize(Attributor &A) override {
5794     intersectAssumedBits(BEST_STATE);
5795     const IRPosition &IRP = getIRPosition();
5796     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5797     // can query it when we use has/getAttr. That would allow us to reuse the
5798     // initialize of the base class here.
5799     bool HasByVal =
5800         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5801     getKnownStateFromValue(IRP, getState(),
5802                            /* IgnoreSubsumingPositions */ HasByVal);
5803 
5804     // Initialize the use vector with all direct uses of the associated value.
5805     Argument *Arg = getAssociatedArgument();
5806     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5807       indicatePessimisticFixpoint();
5808     } else {
5809       // Initialize the use vector with all direct uses of the associated value.
5810       for (const Use &U : Arg->uses())
5811         Uses.insert(&U);
5812     }
5813   }
5814 
5815   ChangeStatus manifest(Attributor &A) override {
5816     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5817     if (!getAssociatedValue().getType()->isPointerTy())
5818       return ChangeStatus::UNCHANGED;
5819 
5820     // TODO: From readattrs.ll: "inalloca parameters are always
5821     //                           considered written"
5822     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5823       removeKnownBits(NO_WRITES);
5824       removeAssumedBits(NO_WRITES);
5825     }
5826     return AAMemoryBehaviorFloating::manifest(A);
5827   }
5828 
5829   /// See AbstractAttribute::trackStatistics()
5830   void trackStatistics() const override {
5831     if (isAssumedReadNone())
5832       STATS_DECLTRACK_ARG_ATTR(readnone)
5833     else if (isAssumedReadOnly())
5834       STATS_DECLTRACK_ARG_ATTR(readonly)
5835     else if (isAssumedWriteOnly())
5836       STATS_DECLTRACK_ARG_ATTR(writeonly)
5837   }
5838 };
5839 
5840 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5841   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5842       : AAMemoryBehaviorArgument(IRP, A) {}
5843 
5844   /// See AbstractAttribute::initialize(...).
5845   void initialize(Attributor &A) override {
5846     if (Argument *Arg = getAssociatedArgument()) {
5847       if (Arg->hasByValAttr()) {
5848         addKnownBits(NO_WRITES);
5849         removeKnownBits(NO_READS);
5850         removeAssumedBits(NO_READS);
5851       }
5852     }
5853     AAMemoryBehaviorArgument::initialize(A);
5854   }
5855 
5856   /// See AbstractAttribute::updateImpl(...).
5857   ChangeStatus updateImpl(Attributor &A) override {
5858     // TODO: Once we have call site specific value information we can provide
5859     //       call site specific liveness liveness information and then it makes
5860     //       sense to specialize attributes for call sites arguments instead of
5861     //       redirecting requests to the callee argument.
5862     Argument *Arg = getAssociatedArgument();
5863     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5864     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5865     return clampStateAndIndicateChange(
5866         getState(),
5867         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5868   }
5869 
5870   /// See AbstractAttribute::trackStatistics()
5871   void trackStatistics() const override {
5872     if (isAssumedReadNone())
5873       STATS_DECLTRACK_CSARG_ATTR(readnone)
5874     else if (isAssumedReadOnly())
5875       STATS_DECLTRACK_CSARG_ATTR(readonly)
5876     else if (isAssumedWriteOnly())
5877       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5878   }
5879 };
5880 
5881 /// Memory behavior attribute for a call site return position.
5882 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5883   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5884       : AAMemoryBehaviorFloating(IRP, A) {}
5885 
5886   /// See AbstractAttribute::manifest(...).
5887   ChangeStatus manifest(Attributor &A) override {
5888     // We do not annotate returned values.
5889     return ChangeStatus::UNCHANGED;
5890   }
5891 
5892   /// See AbstractAttribute::trackStatistics()
5893   void trackStatistics() const override {}
5894 };
5895 
5896 /// An AA to represent the memory behavior function attributes.
5897 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5898   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5899       : AAMemoryBehaviorImpl(IRP, A) {}
5900 
5901   /// See AbstractAttribute::updateImpl(Attributor &A).
5902   virtual ChangeStatus updateImpl(Attributor &A) override;
5903 
5904   /// See AbstractAttribute::manifest(...).
5905   ChangeStatus manifest(Attributor &A) override {
5906     Function &F = cast<Function>(getAnchorValue());
5907     if (isAssumedReadNone()) {
5908       F.removeFnAttr(Attribute::ArgMemOnly);
5909       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5910       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5911     }
5912     return AAMemoryBehaviorImpl::manifest(A);
5913   }
5914 
5915   /// See AbstractAttribute::trackStatistics()
5916   void trackStatistics() const override {
5917     if (isAssumedReadNone())
5918       STATS_DECLTRACK_FN_ATTR(readnone)
5919     else if (isAssumedReadOnly())
5920       STATS_DECLTRACK_FN_ATTR(readonly)
5921     else if (isAssumedWriteOnly())
5922       STATS_DECLTRACK_FN_ATTR(writeonly)
5923   }
5924 };
5925 
5926 /// AAMemoryBehavior attribute for call sites.
5927 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5928   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5929       : AAMemoryBehaviorImpl(IRP, A) {}
5930 
5931   /// See AbstractAttribute::initialize(...).
5932   void initialize(Attributor &A) override {
5933     AAMemoryBehaviorImpl::initialize(A);
5934     Function *F = getAssociatedFunction();
5935     if (!F || !A.isFunctionIPOAmendable(*F)) {
5936       indicatePessimisticFixpoint();
5937       return;
5938     }
5939   }
5940 
5941   /// See AbstractAttribute::updateImpl(...).
5942   ChangeStatus updateImpl(Attributor &A) override {
5943     // TODO: Once we have call site specific value information we can provide
5944     //       call site specific liveness liveness information and then it makes
5945     //       sense to specialize attributes for call sites arguments instead of
5946     //       redirecting requests to the callee argument.
5947     Function *F = getAssociatedFunction();
5948     const IRPosition &FnPos = IRPosition::function(*F);
5949     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5950     return clampStateAndIndicateChange(
5951         getState(),
5952         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5953   }
5954 
5955   /// See AbstractAttribute::trackStatistics()
5956   void trackStatistics() const override {
5957     if (isAssumedReadNone())
5958       STATS_DECLTRACK_CS_ATTR(readnone)
5959     else if (isAssumedReadOnly())
5960       STATS_DECLTRACK_CS_ATTR(readonly)
5961     else if (isAssumedWriteOnly())
5962       STATS_DECLTRACK_CS_ATTR(writeonly)
5963   }
5964 };
5965 
5966 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5967 
5968   // The current assumed state used to determine a change.
5969   auto AssumedState = getAssumed();
5970 
5971   auto CheckRWInst = [&](Instruction &I) {
5972     // If the instruction has an own memory behavior state, use it to restrict
5973     // the local state. No further analysis is required as the other memory
5974     // state is as optimistic as it gets.
5975     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5976       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5977           *this, IRPosition::callsite_function(*CB));
5978       intersectAssumedBits(MemBehaviorAA.getAssumed());
5979       return !isAtFixpoint();
5980     }
5981 
5982     // Remove access kind modifiers if necessary.
5983     if (I.mayReadFromMemory())
5984       removeAssumedBits(NO_READS);
5985     if (I.mayWriteToMemory())
5986       removeAssumedBits(NO_WRITES);
5987     return !isAtFixpoint();
5988   };
5989 
5990   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5991     return indicatePessimisticFixpoint();
5992 
5993   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5994                                         : ChangeStatus::UNCHANGED;
5995 }
5996 
5997 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5998 
5999   const IRPosition &IRP = getIRPosition();
6000   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6001   AAMemoryBehavior::StateType &S = getState();
6002 
6003   // First, check the function scope. We take the known information and we avoid
6004   // work if the assumed information implies the current assumed information for
6005   // this attribute. This is a valid for all but byval arguments.
6006   Argument *Arg = IRP.getAssociatedArgument();
6007   AAMemoryBehavior::base_t FnMemAssumedState =
6008       AAMemoryBehavior::StateType::getWorstState();
6009   if (!Arg || !Arg->hasByValAttr()) {
6010     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6011         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6012     FnMemAssumedState = FnMemAA.getAssumed();
6013     S.addKnownBits(FnMemAA.getKnown());
6014     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6015       return ChangeStatus::UNCHANGED;
6016   }
6017 
6018   // Make sure the value is not captured (except through "return"), if
6019   // it is, any information derived would be irrelevant anyway as we cannot
6020   // check the potential aliases introduced by the capture. However, no need
6021   // to fall back to anythign less optimistic than the function state.
6022   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6023       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6024   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6025     S.intersectAssumedBits(FnMemAssumedState);
6026     return ChangeStatus::CHANGED;
6027   }
6028 
6029   // The current assumed state used to determine a change.
6030   auto AssumedState = S.getAssumed();
6031 
6032   // Liveness information to exclude dead users.
6033   // TODO: Take the FnPos once we have call site specific liveness information.
6034   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6035       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6036       /* TrackDependence */ false);
6037 
6038   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6039   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6040     const Use *U = Uses[i];
6041     Instruction *UserI = cast<Instruction>(U->getUser());
6042     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6043                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6044                       << "]\n");
6045     if (A.isAssumedDead(*U, this, &LivenessAA))
6046       continue;
6047 
6048     // Droppable users, e.g., llvm::assume does not actually perform any action.
6049     if (UserI->isDroppable())
6050       continue;
6051 
6052     // Check if the users of UserI should also be visited.
6053     if (followUsersOfUseIn(A, U, UserI))
6054       for (const Use &UserIUse : UserI->uses())
6055         Uses.insert(&UserIUse);
6056 
6057     // If UserI might touch memory we analyze the use in detail.
6058     if (UserI->mayReadOrWriteMemory())
6059       analyzeUseIn(A, U, UserI);
6060   }
6061 
6062   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6063                                         : ChangeStatus::UNCHANGED;
6064 }
6065 
6066 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6067                                                   const Instruction *UserI) {
6068   // The loaded value is unrelated to the pointer argument, no need to
6069   // follow the users of the load.
6070   if (isa<LoadInst>(UserI))
6071     return false;
6072 
6073   // By default we follow all uses assuming UserI might leak information on U,
6074   // we have special handling for call sites operands though.
6075   const auto *CB = dyn_cast<CallBase>(UserI);
6076   if (!CB || !CB->isArgOperand(U))
6077     return true;
6078 
6079   // If the use is a call argument known not to be captured, the users of
6080   // the call do not need to be visited because they have to be unrelated to
6081   // the input. Note that this check is not trivial even though we disallow
6082   // general capturing of the underlying argument. The reason is that the
6083   // call might the argument "through return", which we allow and for which we
6084   // need to check call users.
6085   if (U->get()->getType()->isPointerTy()) {
6086     unsigned ArgNo = CB->getArgOperandNo(U);
6087     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6088         *this, IRPosition::callsite_argument(*CB, ArgNo),
6089         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6090     return !ArgNoCaptureAA.isAssumedNoCapture();
6091   }
6092 
6093   return true;
6094 }
6095 
6096 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6097                                             const Instruction *UserI) {
6098   assert(UserI->mayReadOrWriteMemory());
6099 
6100   switch (UserI->getOpcode()) {
6101   default:
6102     // TODO: Handle all atomics and other side-effect operations we know of.
6103     break;
6104   case Instruction::Load:
6105     // Loads cause the NO_READS property to disappear.
6106     removeAssumedBits(NO_READS);
6107     return;
6108 
6109   case Instruction::Store:
6110     // Stores cause the NO_WRITES property to disappear if the use is the
6111     // pointer operand. Note that we do assume that capturing was taken care of
6112     // somewhere else.
6113     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6114       removeAssumedBits(NO_WRITES);
6115     return;
6116 
6117   case Instruction::Call:
6118   case Instruction::CallBr:
6119   case Instruction::Invoke: {
6120     // For call sites we look at the argument memory behavior attribute (this
6121     // could be recursive!) in order to restrict our own state.
6122     const auto *CB = cast<CallBase>(UserI);
6123 
6124     // Give up on operand bundles.
6125     if (CB->isBundleOperand(U)) {
6126       indicatePessimisticFixpoint();
6127       return;
6128     }
6129 
6130     // Calling a function does read the function pointer, maybe write it if the
6131     // function is self-modifying.
6132     if (CB->isCallee(U)) {
6133       removeAssumedBits(NO_READS);
6134       break;
6135     }
6136 
6137     // Adjust the possible access behavior based on the information on the
6138     // argument.
6139     IRPosition Pos;
6140     if (U->get()->getType()->isPointerTy())
6141       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6142     else
6143       Pos = IRPosition::callsite_function(*CB);
6144     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6145         *this, Pos,
6146         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6147     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6148     // and at least "known".
6149     intersectAssumedBits(MemBehaviorAA.getAssumed());
6150     return;
6151   }
6152   };
6153 
6154   // Generally, look at the "may-properties" and adjust the assumed state if we
6155   // did not trigger special handling before.
6156   if (UserI->mayReadFromMemory())
6157     removeAssumedBits(NO_READS);
6158   if (UserI->mayWriteToMemory())
6159     removeAssumedBits(NO_WRITES);
6160 }
6161 
6162 } // namespace
6163 
6164 /// -------------------- Memory Locations Attributes ---------------------------
6165 /// Includes read-none, argmemonly, inaccessiblememonly,
6166 /// inaccessiblememorargmemonly
6167 /// ----------------------------------------------------------------------------
6168 
6169 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6170     AAMemoryLocation::MemoryLocationsKind MLK) {
6171   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6172     return "all memory";
6173   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6174     return "no memory";
6175   std::string S = "memory:";
6176   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6177     S += "stack,";
6178   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6179     S += "constant,";
6180   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6181     S += "internal global,";
6182   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6183     S += "external global,";
6184   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6185     S += "argument,";
6186   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6187     S += "inaccessible,";
6188   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6189     S += "malloced,";
6190   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6191     S += "unknown,";
6192   S.pop_back();
6193   return S;
6194 }
6195 
6196 namespace {
6197 struct AAMemoryLocationImpl : public AAMemoryLocation {
6198 
6199   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6200       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6201     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6202       AccessKind2Accesses[u] = nullptr;
6203   }
6204 
6205   ~AAMemoryLocationImpl() {
6206     // The AccessSets are allocated via a BumpPtrAllocator, we call
6207     // the destructor manually.
6208     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6209       if (AccessKind2Accesses[u])
6210         AccessKind2Accesses[u]->~AccessSet();
6211   }
6212 
6213   /// See AbstractAttribute::initialize(...).
6214   void initialize(Attributor &A) override {
6215     intersectAssumedBits(BEST_STATE);
6216     getKnownStateFromValue(A, getIRPosition(), getState());
6217     IRAttribute::initialize(A);
6218   }
6219 
6220   /// Return the memory behavior information encoded in the IR for \p IRP.
6221   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6222                                      BitIntegerState &State,
6223                                      bool IgnoreSubsumingPositions = false) {
6224     // For internal functions we ignore `argmemonly` and
6225     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6226     // constant propagation. It is unclear if this is the best way but it is
6227     // unlikely this will cause real performance problems. If we are deriving
6228     // attributes for the anchor function we even remove the attribute in
6229     // addition to ignoring it.
6230     bool UseArgMemOnly = true;
6231     Function *AnchorFn = IRP.getAnchorScope();
6232     if (AnchorFn && A.isRunOn(*AnchorFn))
6233       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6234 
6235     SmallVector<Attribute, 2> Attrs;
6236     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6237     for (const Attribute &Attr : Attrs) {
6238       switch (Attr.getKindAsEnum()) {
6239       case Attribute::ReadNone:
6240         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6241         break;
6242       case Attribute::InaccessibleMemOnly:
6243         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6244         break;
6245       case Attribute::ArgMemOnly:
6246         if (UseArgMemOnly)
6247           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6248         else
6249           IRP.removeAttrs({Attribute::ArgMemOnly});
6250         break;
6251       case Attribute::InaccessibleMemOrArgMemOnly:
6252         if (UseArgMemOnly)
6253           State.addKnownBits(inverseLocation(
6254               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6255         else
6256           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6257         break;
6258       default:
6259         llvm_unreachable("Unexpected attribute!");
6260       }
6261     }
6262   }
6263 
6264   /// See AbstractAttribute::getDeducedAttributes(...).
6265   void getDeducedAttributes(LLVMContext &Ctx,
6266                             SmallVectorImpl<Attribute> &Attrs) const override {
6267     assert(Attrs.size() == 0);
6268     if (isAssumedReadNone()) {
6269       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6270     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6271       if (isAssumedInaccessibleMemOnly())
6272         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6273       else if (isAssumedArgMemOnly())
6274         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6275       else if (isAssumedInaccessibleOrArgMemOnly())
6276         Attrs.push_back(
6277             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6278     }
6279     assert(Attrs.size() <= 1);
6280   }
6281 
6282   /// See AbstractAttribute::manifest(...).
6283   ChangeStatus manifest(Attributor &A) override {
6284     const IRPosition &IRP = getIRPosition();
6285 
6286     // Check if we would improve the existing attributes first.
6287     SmallVector<Attribute, 4> DeducedAttrs;
6288     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6289     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6290           return IRP.hasAttr(Attr.getKindAsEnum(),
6291                              /* IgnoreSubsumingPositions */ true);
6292         }))
6293       return ChangeStatus::UNCHANGED;
6294 
6295     // Clear existing attributes.
6296     IRP.removeAttrs(AttrKinds);
6297     if (isAssumedReadNone())
6298       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6299 
6300     // Use the generic manifest method.
6301     return IRAttribute::manifest(A);
6302   }
6303 
6304   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6305   bool checkForAllAccessesToMemoryKind(
6306       function_ref<bool(const Instruction *, const Value *, AccessKind,
6307                         MemoryLocationsKind)>
6308           Pred,
6309       MemoryLocationsKind RequestedMLK) const override {
6310     if (!isValidState())
6311       return false;
6312 
6313     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6314     if (AssumedMLK == NO_LOCATIONS)
6315       return true;
6316 
6317     unsigned Idx = 0;
6318     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6319          CurMLK *= 2, ++Idx) {
6320       if (CurMLK & RequestedMLK)
6321         continue;
6322 
6323       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6324         for (const AccessInfo &AI : *Accesses)
6325           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6326             return false;
6327     }
6328 
6329     return true;
6330   }
6331 
6332   ChangeStatus indicatePessimisticFixpoint() override {
6333     // If we give up and indicate a pessimistic fixpoint this instruction will
6334     // become an access for all potential access kinds:
6335     // TODO: Add pointers for argmemonly and globals to improve the results of
6336     //       checkForAllAccessesToMemoryKind.
6337     bool Changed = false;
6338     MemoryLocationsKind KnownMLK = getKnown();
6339     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6340     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6341       if (!(CurMLK & KnownMLK))
6342         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6343                                   getAccessKindFromInst(I));
6344     return AAMemoryLocation::indicatePessimisticFixpoint();
6345   }
6346 
6347 protected:
6348   /// Helper struct to tie together an instruction that has a read or write
6349   /// effect with the pointer it accesses (if any).
6350   struct AccessInfo {
6351 
6352     /// The instruction that caused the access.
6353     const Instruction *I;
6354 
6355     /// The base pointer that is accessed, or null if unknown.
6356     const Value *Ptr;
6357 
6358     /// The kind of access (read/write/read+write).
6359     AccessKind Kind;
6360 
6361     bool operator==(const AccessInfo &RHS) const {
6362       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6363     }
6364     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6365       if (LHS.I != RHS.I)
6366         return LHS.I < RHS.I;
6367       if (LHS.Ptr != RHS.Ptr)
6368         return LHS.Ptr < RHS.Ptr;
6369       if (LHS.Kind != RHS.Kind)
6370         return LHS.Kind < RHS.Kind;
6371       return false;
6372     }
6373   };
6374 
6375   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6376   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6377   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6378   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6379 
6380   /// Return the kind(s) of location that may be accessed by \p V.
6381   AAMemoryLocation::MemoryLocationsKind
6382   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6383 
6384   /// Return the access kind as determined by \p I.
6385   AccessKind getAccessKindFromInst(const Instruction *I) {
6386     AccessKind AK = READ_WRITE;
6387     if (I) {
6388       AK = I->mayReadFromMemory() ? READ : NONE;
6389       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6390     }
6391     return AK;
6392   }
6393 
6394   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6395   /// an access of kind \p AK to a \p MLK memory location with the access
6396   /// pointer \p Ptr.
6397   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6398                                  MemoryLocationsKind MLK, const Instruction *I,
6399                                  const Value *Ptr, bool &Changed,
6400                                  AccessKind AK = READ_WRITE) {
6401 
6402     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6403     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6404     if (!Accesses)
6405       Accesses = new (Allocator) AccessSet();
6406     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6407     State.removeAssumedBits(MLK);
6408   }
6409 
6410   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6411   /// arguments, and update the state and access map accordingly.
6412   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6413                           AAMemoryLocation::StateType &State, bool &Changed);
6414 
6415   /// Used to allocate access sets.
6416   BumpPtrAllocator &Allocator;
6417 
6418   /// The set of IR attributes AAMemoryLocation deals with.
6419   static const Attribute::AttrKind AttrKinds[4];
6420 };
6421 
6422 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6423     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6424     Attribute::InaccessibleMemOrArgMemOnly};
6425 
6426 void AAMemoryLocationImpl::categorizePtrValue(
6427     Attributor &A, const Instruction &I, const Value &Ptr,
6428     AAMemoryLocation::StateType &State, bool &Changed) {
6429   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6430                     << Ptr << " ["
6431                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6432 
6433   auto StripGEPCB = [](Value *V) -> Value * {
6434     auto *GEP = dyn_cast<GEPOperator>(V);
6435     while (GEP) {
6436       V = GEP->getPointerOperand();
6437       GEP = dyn_cast<GEPOperator>(V);
6438     }
6439     return V;
6440   };
6441 
6442   auto VisitValueCB = [&](Value &V, const Instruction *,
6443                           AAMemoryLocation::StateType &T,
6444                           bool Stripped) -> bool {
6445     MemoryLocationsKind MLK = NO_LOCATIONS;
6446     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6447     if (isa<UndefValue>(V))
6448       return true;
6449     if (auto *Arg = dyn_cast<Argument>(&V)) {
6450       if (Arg->hasByValAttr())
6451         MLK = NO_LOCAL_MEM;
6452       else
6453         MLK = NO_ARGUMENT_MEM;
6454     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6455       if (GV->hasLocalLinkage())
6456         MLK = NO_GLOBAL_INTERNAL_MEM;
6457       else
6458         MLK = NO_GLOBAL_EXTERNAL_MEM;
6459     } else if (isa<ConstantPointerNull>(V) &&
6460                !NullPointerIsDefined(getAssociatedFunction(),
6461                                      V.getType()->getPointerAddressSpace())) {
6462       return true;
6463     } else if (isa<AllocaInst>(V)) {
6464       MLK = NO_LOCAL_MEM;
6465     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6466       const auto &NoAliasAA =
6467           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6468       if (NoAliasAA.isAssumedNoAlias())
6469         MLK = NO_MALLOCED_MEM;
6470       else
6471         MLK = NO_UNKOWN_MEM;
6472     } else {
6473       MLK = NO_UNKOWN_MEM;
6474     }
6475 
6476     assert(MLK != NO_LOCATIONS && "No location specified!");
6477     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6478                               getAccessKindFromInst(&I));
6479     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6480                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6481                       << "\n");
6482     return true;
6483   };
6484 
6485   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6486           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6487           /* UseValueSimplify */ true,
6488           /* MaxValues */ 32, StripGEPCB)) {
6489     LLVM_DEBUG(
6490         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6491     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6492                               getAccessKindFromInst(&I));
6493   } else {
6494     LLVM_DEBUG(
6495         dbgs()
6496         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6497         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6498   }
6499 }
6500 
6501 AAMemoryLocation::MemoryLocationsKind
6502 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6503                                                   bool &Changed) {
6504   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6505                     << I << "\n");
6506 
6507   AAMemoryLocation::StateType AccessedLocs;
6508   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6509 
6510   if (auto *CB = dyn_cast<CallBase>(&I)) {
6511 
6512     // First check if we assume any memory is access is visible.
6513     const auto &CBMemLocationAA =
6514         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6515     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6516                       << " [" << CBMemLocationAA << "]\n");
6517 
6518     if (CBMemLocationAA.isAssumedReadNone())
6519       return NO_LOCATIONS;
6520 
6521     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6522       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6523                                 Changed, getAccessKindFromInst(&I));
6524       return AccessedLocs.getAssumed();
6525     }
6526 
6527     uint32_t CBAssumedNotAccessedLocs =
6528         CBMemLocationAA.getAssumedNotAccessedLocation();
6529 
6530     // Set the argmemonly and global bit as we handle them separately below.
6531     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6532         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6533 
6534     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6535       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6536         continue;
6537       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6538                                 getAccessKindFromInst(&I));
6539     }
6540 
6541     // Now handle global memory if it might be accessed. This is slightly tricky
6542     // as NO_GLOBAL_MEM has multiple bits set.
6543     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6544     if (HasGlobalAccesses) {
6545       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6546                             AccessKind Kind, MemoryLocationsKind MLK) {
6547         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6548                                   getAccessKindFromInst(&I));
6549         return true;
6550       };
6551       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6552               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6553         return AccessedLocs.getWorstState();
6554     }
6555 
6556     LLVM_DEBUG(
6557         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6558                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6559 
6560     // Now handle argument memory if it might be accessed.
6561     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6562     if (HasArgAccesses) {
6563       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6564            ++ArgNo) {
6565 
6566         // Skip non-pointer arguments.
6567         const Value *ArgOp = CB->getArgOperand(ArgNo);
6568         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6569           continue;
6570 
6571         // Skip readnone arguments.
6572         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6573         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6574             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6575 
6576         if (ArgOpMemLocationAA.isAssumedReadNone())
6577           continue;
6578 
6579         // Categorize potentially accessed pointer arguments as if there was an
6580         // access instruction with them as pointer.
6581         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6582       }
6583     }
6584 
6585     LLVM_DEBUG(
6586         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6587                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6588 
6589     return AccessedLocs.getAssumed();
6590   }
6591 
6592   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6593     LLVM_DEBUG(
6594         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6595                << I << " [" << *Ptr << "]\n");
6596     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6597     return AccessedLocs.getAssumed();
6598   }
6599 
6600   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6601                     << I << "\n");
6602   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6603                             getAccessKindFromInst(&I));
6604   return AccessedLocs.getAssumed();
6605 }
6606 
6607 /// An AA to represent the memory behavior function attributes.
6608 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6609   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6610       : AAMemoryLocationImpl(IRP, A) {}
6611 
6612   /// See AbstractAttribute::updateImpl(Attributor &A).
6613   virtual ChangeStatus updateImpl(Attributor &A) override {
6614 
6615     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6616         *this, getIRPosition(), /* TrackDependence */ false);
6617     if (MemBehaviorAA.isAssumedReadNone()) {
6618       if (MemBehaviorAA.isKnownReadNone())
6619         return indicateOptimisticFixpoint();
6620       assert(isAssumedReadNone() &&
6621              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6622       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6623       return ChangeStatus::UNCHANGED;
6624     }
6625 
6626     // The current assumed state used to determine a change.
6627     auto AssumedState = getAssumed();
6628     bool Changed = false;
6629 
6630     auto CheckRWInst = [&](Instruction &I) {
6631       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6632       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6633                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6634       removeAssumedBits(inverseLocation(MLK, false, false));
6635       return true;
6636     };
6637 
6638     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6639       return indicatePessimisticFixpoint();
6640 
6641     Changed |= AssumedState != getAssumed();
6642     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6643   }
6644 
6645   /// See AbstractAttribute::trackStatistics()
6646   void trackStatistics() const override {
6647     if (isAssumedReadNone())
6648       STATS_DECLTRACK_FN_ATTR(readnone)
6649     else if (isAssumedArgMemOnly())
6650       STATS_DECLTRACK_FN_ATTR(argmemonly)
6651     else if (isAssumedInaccessibleMemOnly())
6652       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6653     else if (isAssumedInaccessibleOrArgMemOnly())
6654       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6655   }
6656 };
6657 
6658 /// AAMemoryLocation attribute for call sites.
6659 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6660   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6661       : AAMemoryLocationImpl(IRP, A) {}
6662 
6663   /// See AbstractAttribute::initialize(...).
6664   void initialize(Attributor &A) override {
6665     AAMemoryLocationImpl::initialize(A);
6666     Function *F = getAssociatedFunction();
6667     if (!F || !A.isFunctionIPOAmendable(*F)) {
6668       indicatePessimisticFixpoint();
6669       return;
6670     }
6671   }
6672 
6673   /// See AbstractAttribute::updateImpl(...).
6674   ChangeStatus updateImpl(Attributor &A) override {
6675     // TODO: Once we have call site specific value information we can provide
6676     //       call site specific liveness liveness information and then it makes
6677     //       sense to specialize attributes for call sites arguments instead of
6678     //       redirecting requests to the callee argument.
6679     Function *F = getAssociatedFunction();
6680     const IRPosition &FnPos = IRPosition::function(*F);
6681     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6682     bool Changed = false;
6683     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6684                           AccessKind Kind, MemoryLocationsKind MLK) {
6685       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6686                                 getAccessKindFromInst(I));
6687       return true;
6688     };
6689     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6690       return indicatePessimisticFixpoint();
6691     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6692   }
6693 
6694   /// See AbstractAttribute::trackStatistics()
6695   void trackStatistics() const override {
6696     if (isAssumedReadNone())
6697       STATS_DECLTRACK_CS_ATTR(readnone)
6698   }
6699 };
6700 
6701 /// ------------------ Value Constant Range Attribute -------------------------
6702 
6703 struct AAValueConstantRangeImpl : AAValueConstantRange {
6704   using StateType = IntegerRangeState;
6705   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6706       : AAValueConstantRange(IRP, A) {}
6707 
6708   /// See AbstractAttribute::getAsStr().
6709   const std::string getAsStr() const override {
6710     std::string Str;
6711     llvm::raw_string_ostream OS(Str);
6712     OS << "range(" << getBitWidth() << ")<";
6713     getKnown().print(OS);
6714     OS << " / ";
6715     getAssumed().print(OS);
6716     OS << ">";
6717     return OS.str();
6718   }
6719 
6720   /// Helper function to get a SCEV expr for the associated value at program
6721   /// point \p I.
6722   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6723     if (!getAnchorScope())
6724       return nullptr;
6725 
6726     ScalarEvolution *SE =
6727         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6728             *getAnchorScope());
6729 
6730     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6731         *getAnchorScope());
6732 
6733     if (!SE || !LI)
6734       return nullptr;
6735 
6736     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6737     if (!I)
6738       return S;
6739 
6740     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6741   }
6742 
6743   /// Helper function to get a range from SCEV for the associated value at
6744   /// program point \p I.
6745   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6746                                          const Instruction *I = nullptr) const {
6747     if (!getAnchorScope())
6748       return getWorstState(getBitWidth());
6749 
6750     ScalarEvolution *SE =
6751         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6752             *getAnchorScope());
6753 
6754     const SCEV *S = getSCEV(A, I);
6755     if (!SE || !S)
6756       return getWorstState(getBitWidth());
6757 
6758     return SE->getUnsignedRange(S);
6759   }
6760 
6761   /// Helper function to get a range from LVI for the associated value at
6762   /// program point \p I.
6763   ConstantRange
6764   getConstantRangeFromLVI(Attributor &A,
6765                           const Instruction *CtxI = nullptr) const {
6766     if (!getAnchorScope())
6767       return getWorstState(getBitWidth());
6768 
6769     LazyValueInfo *LVI =
6770         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6771             *getAnchorScope());
6772 
6773     if (!LVI || !CtxI)
6774       return getWorstState(getBitWidth());
6775     return LVI->getConstantRange(&getAssociatedValue(),
6776                                  const_cast<BasicBlock *>(CtxI->getParent()),
6777                                  const_cast<Instruction *>(CtxI));
6778   }
6779 
6780   /// See AAValueConstantRange::getKnownConstantRange(..).
6781   ConstantRange
6782   getKnownConstantRange(Attributor &A,
6783                         const Instruction *CtxI = nullptr) const override {
6784     if (!CtxI || CtxI == getCtxI())
6785       return getKnown();
6786 
6787     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6788     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6789     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6790   }
6791 
6792   /// See AAValueConstantRange::getAssumedConstantRange(..).
6793   ConstantRange
6794   getAssumedConstantRange(Attributor &A,
6795                           const Instruction *CtxI = nullptr) const override {
6796     // TODO: Make SCEV use Attributor assumption.
6797     //       We may be able to bound a variable range via assumptions in
6798     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6799     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6800 
6801     if (!CtxI || CtxI == getCtxI())
6802       return getAssumed();
6803 
6804     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6805     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6806     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6807   }
6808 
6809   /// See AbstractAttribute::initialize(..).
6810   void initialize(Attributor &A) override {
6811     // Intersect a range given by SCEV.
6812     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6813 
6814     // Intersect a range given by LVI.
6815     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6816   }
6817 
6818   /// Helper function to create MDNode for range metadata.
6819   static MDNode *
6820   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6821                             const ConstantRange &AssumedConstantRange) {
6822     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6823                                   Ty, AssumedConstantRange.getLower())),
6824                               ConstantAsMetadata::get(ConstantInt::get(
6825                                   Ty, AssumedConstantRange.getUpper()))};
6826     return MDNode::get(Ctx, LowAndHigh);
6827   }
6828 
6829   /// Return true if \p Assumed is included in \p KnownRanges.
6830   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6831 
6832     if (Assumed.isFullSet())
6833       return false;
6834 
6835     if (!KnownRanges)
6836       return true;
6837 
6838     // If multiple ranges are annotated in IR, we give up to annotate assumed
6839     // range for now.
6840 
6841     // TODO:  If there exists a known range which containts assumed range, we
6842     // can say assumed range is better.
6843     if (KnownRanges->getNumOperands() > 2)
6844       return false;
6845 
6846     ConstantInt *Lower =
6847         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6848     ConstantInt *Upper =
6849         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6850 
6851     ConstantRange Known(Lower->getValue(), Upper->getValue());
6852     return Known.contains(Assumed) && Known != Assumed;
6853   }
6854 
6855   /// Helper function to set range metadata.
6856   static bool
6857   setRangeMetadataIfisBetterRange(Instruction *I,
6858                                   const ConstantRange &AssumedConstantRange) {
6859     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6860     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6861       if (!AssumedConstantRange.isEmptySet()) {
6862         I->setMetadata(LLVMContext::MD_range,
6863                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6864                                                  AssumedConstantRange));
6865         return true;
6866       }
6867     }
6868     return false;
6869   }
6870 
6871   /// See AbstractAttribute::manifest()
6872   ChangeStatus manifest(Attributor &A) override {
6873     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6874     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6875     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6876 
6877     auto &V = getAssociatedValue();
6878     if (!AssumedConstantRange.isEmptySet() &&
6879         !AssumedConstantRange.isSingleElement()) {
6880       if (Instruction *I = dyn_cast<Instruction>(&V))
6881         if (isa<CallInst>(I) || isa<LoadInst>(I))
6882           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6883             Changed = ChangeStatus::CHANGED;
6884     }
6885 
6886     return Changed;
6887   }
6888 };
6889 
6890 struct AAValueConstantRangeArgument final
6891     : AAArgumentFromCallSiteArguments<
6892           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6893   using Base = AAArgumentFromCallSiteArguments<
6894       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6895   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6896       : Base(IRP, A) {}
6897 
6898   /// See AbstractAttribute::initialize(..).
6899   void initialize(Attributor &A) override {
6900     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6901       indicatePessimisticFixpoint();
6902     } else {
6903       Base::initialize(A);
6904     }
6905   }
6906 
6907   /// See AbstractAttribute::trackStatistics()
6908   void trackStatistics() const override {
6909     STATS_DECLTRACK_ARG_ATTR(value_range)
6910   }
6911 };
6912 
6913 struct AAValueConstantRangeReturned
6914     : AAReturnedFromReturnedValues<AAValueConstantRange,
6915                                    AAValueConstantRangeImpl> {
6916   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6917                                             AAValueConstantRangeImpl>;
6918   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6919       : Base(IRP, A) {}
6920 
6921   /// See AbstractAttribute::initialize(...).
6922   void initialize(Attributor &A) override {}
6923 
6924   /// See AbstractAttribute::trackStatistics()
6925   void trackStatistics() const override {
6926     STATS_DECLTRACK_FNRET_ATTR(value_range)
6927   }
6928 };
6929 
6930 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6931   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6932       : AAValueConstantRangeImpl(IRP, A) {}
6933 
6934   /// See AbstractAttribute::initialize(...).
6935   void initialize(Attributor &A) override {
6936     AAValueConstantRangeImpl::initialize(A);
6937     Value &V = getAssociatedValue();
6938 
6939     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6940       unionAssumed(ConstantRange(C->getValue()));
6941       indicateOptimisticFixpoint();
6942       return;
6943     }
6944 
6945     if (isa<UndefValue>(&V)) {
6946       // Collapse the undef state to 0.
6947       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6948       indicateOptimisticFixpoint();
6949       return;
6950     }
6951 
6952     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6953       return;
6954     // If it is a load instruction with range metadata, use it.
6955     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6956       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6957         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6958         return;
6959       }
6960 
6961     // We can work with PHI and select instruction as we traverse their operands
6962     // during update.
6963     if (isa<SelectInst>(V) || isa<PHINode>(V))
6964       return;
6965 
6966     // Otherwise we give up.
6967     indicatePessimisticFixpoint();
6968 
6969     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6970                       << getAssociatedValue() << "\n");
6971   }
6972 
6973   bool calculateBinaryOperator(
6974       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6975       const Instruction *CtxI,
6976       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6977     Value *LHS = BinOp->getOperand(0);
6978     Value *RHS = BinOp->getOperand(1);
6979     // TODO: Allow non integers as well.
6980     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6981       return false;
6982 
6983     auto &LHSAA =
6984         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6985     QuerriedAAs.push_back(&LHSAA);
6986     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6987 
6988     auto &RHSAA =
6989         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6990     QuerriedAAs.push_back(&RHSAA);
6991     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6992 
6993     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6994 
6995     T.unionAssumed(AssumedRange);
6996 
6997     // TODO: Track a known state too.
6998 
6999     return T.isValidState();
7000   }
7001 
7002   bool calculateCastInst(
7003       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7004       const Instruction *CtxI,
7005       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7006     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7007     // TODO: Allow non integers as well.
7008     Value &OpV = *CastI->getOperand(0);
7009     if (!OpV.getType()->isIntegerTy())
7010       return false;
7011 
7012     auto &OpAA =
7013         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7014     QuerriedAAs.push_back(&OpAA);
7015     T.unionAssumed(
7016         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7017     return T.isValidState();
7018   }
7019 
7020   bool
7021   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7022                    const Instruction *CtxI,
7023                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7024     Value *LHS = CmpI->getOperand(0);
7025     Value *RHS = CmpI->getOperand(1);
7026     // TODO: Allow non integers as well.
7027     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7028       return false;
7029 
7030     auto &LHSAA =
7031         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7032     QuerriedAAs.push_back(&LHSAA);
7033     auto &RHSAA =
7034         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7035     QuerriedAAs.push_back(&RHSAA);
7036 
7037     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7038     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7039 
7040     // If one of them is empty set, we can't decide.
7041     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7042       return true;
7043 
7044     bool MustTrue = false, MustFalse = false;
7045 
7046     auto AllowedRegion =
7047         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7048 
7049     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7050         CmpI->getPredicate(), RHSAARange);
7051 
7052     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7053       MustFalse = true;
7054 
7055     if (SatisfyingRegion.contains(LHSAARange))
7056       MustTrue = true;
7057 
7058     assert((!MustTrue || !MustFalse) &&
7059            "Either MustTrue or MustFalse should be false!");
7060 
7061     if (MustTrue)
7062       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7063     else if (MustFalse)
7064       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7065     else
7066       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7067 
7068     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7069                       << " " << RHSAA << "\n");
7070 
7071     // TODO: Track a known state too.
7072     return T.isValidState();
7073   }
7074 
7075   /// See AbstractAttribute::updateImpl(...).
7076   ChangeStatus updateImpl(Attributor &A) override {
7077     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7078                             IntegerRangeState &T, bool Stripped) -> bool {
7079       Instruction *I = dyn_cast<Instruction>(&V);
7080       if (!I || isa<CallBase>(I)) {
7081 
7082         // If the value is not instruction, we query AA to Attributor.
7083         const auto &AA =
7084             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7085 
7086         // Clamp operator is not used to utilize a program point CtxI.
7087         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7088 
7089         return T.isValidState();
7090       }
7091 
7092       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7093       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7094         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7095           return false;
7096       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7097         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7098           return false;
7099       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7100         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7101           return false;
7102       } else {
7103         // Give up with other instructions.
7104         // TODO: Add other instructions
7105 
7106         T.indicatePessimisticFixpoint();
7107         return false;
7108       }
7109 
7110       // Catch circular reasoning in a pessimistic way for now.
7111       // TODO: Check how the range evolves and if we stripped anything, see also
7112       //       AADereferenceable or AAAlign for similar situations.
7113       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7114         if (QueriedAA != this)
7115           continue;
7116         // If we are in a stady state we do not need to worry.
7117         if (T.getAssumed() == getState().getAssumed())
7118           continue;
7119         T.indicatePessimisticFixpoint();
7120       }
7121 
7122       return T.isValidState();
7123     };
7124 
7125     IntegerRangeState T(getBitWidth());
7126 
7127     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7128             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7129             /* UseValueSimplify */ false))
7130       return indicatePessimisticFixpoint();
7131 
7132     return clampStateAndIndicateChange(getState(), T);
7133   }
7134 
7135   /// See AbstractAttribute::trackStatistics()
7136   void trackStatistics() const override {
7137     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7138   }
7139 };
7140 
7141 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7142   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7143       : AAValueConstantRangeImpl(IRP, A) {}
7144 
7145   /// See AbstractAttribute::initialize(...).
7146   ChangeStatus updateImpl(Attributor &A) override {
7147     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7148                      "not be called");
7149   }
7150 
7151   /// See AbstractAttribute::trackStatistics()
7152   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7153 };
7154 
7155 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7156   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7157       : AAValueConstantRangeFunction(IRP, A) {}
7158 
7159   /// See AbstractAttribute::trackStatistics()
7160   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7161 };
7162 
7163 struct AAValueConstantRangeCallSiteReturned
7164     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7165                                      AAValueConstantRangeImpl> {
7166   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7167       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7168                                        AAValueConstantRangeImpl>(IRP, A) {}
7169 
7170   /// See AbstractAttribute::initialize(...).
7171   void initialize(Attributor &A) override {
7172     // If it is a load instruction with range metadata, use the metadata.
7173     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7174       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7175         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7176 
7177     AAValueConstantRangeImpl::initialize(A);
7178   }
7179 
7180   /// See AbstractAttribute::trackStatistics()
7181   void trackStatistics() const override {
7182     STATS_DECLTRACK_CSRET_ATTR(value_range)
7183   }
7184 };
7185 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7186   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7187       : AAValueConstantRangeFloating(IRP, A) {}
7188 
7189   /// See AbstractAttribute::trackStatistics()
7190   void trackStatistics() const override {
7191     STATS_DECLTRACK_CSARG_ATTR(value_range)
7192   }
7193 };
7194 
7195 /// ------------------ Potential Values Attribute -------------------------
7196 
7197 struct AAPotentialValuesImpl : AAPotentialValues {
7198   using StateType = PotentialConstantIntValuesState;
7199 
7200   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7201       : AAPotentialValues(IRP, A) {}
7202 
7203   /// See AbstractAttribute::getAsStr().
7204   const std::string getAsStr() const override {
7205     std::string Str;
7206     llvm::raw_string_ostream OS(Str);
7207     OS << getState();
7208     return OS.str();
7209   }
7210 
7211   /// See AbstractAttribute::updateImpl(...).
7212   ChangeStatus updateImpl(Attributor &A) override {
7213     return indicatePessimisticFixpoint();
7214   }
7215 };
7216 
7217 struct AAPotentialValuesArgument final
7218     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7219                                       PotentialConstantIntValuesState> {
7220   using Base =
7221       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7222                                       PotentialConstantIntValuesState>;
7223   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7224       : Base(IRP, A) {}
7225 
7226   /// See AbstractAttribute::initialize(..).
7227   void initialize(Attributor &A) override {
7228     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7229       indicatePessimisticFixpoint();
7230     } else {
7231       Base::initialize(A);
7232     }
7233   }
7234 
7235   /// See AbstractAttribute::trackStatistics()
7236   void trackStatistics() const override {
7237     STATS_DECLTRACK_ARG_ATTR(potential_values)
7238   }
7239 };
7240 
7241 struct AAPotentialValuesReturned
7242     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7243   using Base =
7244       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7245   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7246       : Base(IRP, A) {}
7247 
7248   /// See AbstractAttribute::trackStatistics()
7249   void trackStatistics() const override {
7250     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7251   }
7252 };
7253 
7254 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7255   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7256       : AAPotentialValuesImpl(IRP, A) {}
7257 
7258   /// See AbstractAttribute::initialize(..).
7259   void initialize(Attributor &A) override {
7260     Value &V = getAssociatedValue();
7261 
7262     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7263       unionAssumed(C->getValue());
7264       indicateOptimisticFixpoint();
7265       return;
7266     }
7267 
7268     if (isa<UndefValue>(&V)) {
7269       // Collapse the undef state to 0.
7270       unionAssumed(
7271           APInt(/* numBits */ getAssociatedType()->getIntegerBitWidth(),
7272                 /* val */ 0));
7273       indicateOptimisticFixpoint();
7274       return;
7275     }
7276 
7277     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7278       return;
7279 
7280     if (isa<SelectInst>(V) || isa<PHINode>(V))
7281       return;
7282 
7283     indicatePessimisticFixpoint();
7284 
7285     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7286                       << getAssociatedValue() << "\n");
7287   }
7288 
7289   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7290                                 const APInt &RHS) {
7291     ICmpInst::Predicate Pred = ICI->getPredicate();
7292     switch (Pred) {
7293     case ICmpInst::ICMP_UGT:
7294       return LHS.ugt(RHS);
7295     case ICmpInst::ICMP_SGT:
7296       return LHS.sgt(RHS);
7297     case ICmpInst::ICMP_EQ:
7298       return LHS.eq(RHS);
7299     case ICmpInst::ICMP_UGE:
7300       return LHS.uge(RHS);
7301     case ICmpInst::ICMP_SGE:
7302       return LHS.sge(RHS);
7303     case ICmpInst::ICMP_ULT:
7304       return LHS.ult(RHS);
7305     case ICmpInst::ICMP_SLT:
7306       return LHS.slt(RHS);
7307     case ICmpInst::ICMP_NE:
7308       return LHS.ne(RHS);
7309     case ICmpInst::ICMP_ULE:
7310       return LHS.ule(RHS);
7311     case ICmpInst::ICMP_SLE:
7312       return LHS.sle(RHS);
7313     default:
7314       llvm_unreachable("Invalid ICmp predicate!");
7315     }
7316   }
7317 
7318   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7319                                  uint32_t ResultBitWidth) {
7320     Instruction::CastOps CastOp = CI->getOpcode();
7321     switch (CastOp) {
7322     default:
7323       llvm_unreachable("unsupported or not integer cast");
7324     case Instruction::Trunc:
7325       return Src.trunc(ResultBitWidth);
7326     case Instruction::SExt:
7327       return Src.sext(ResultBitWidth);
7328     case Instruction::ZExt:
7329       return Src.zext(ResultBitWidth);
7330     case Instruction::BitCast:
7331       return Src;
7332     }
7333   }
7334 
7335   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7336                                        const APInt &LHS, const APInt &RHS,
7337                                        bool &SkipOperation, bool &Unsupported) {
7338     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7339     // Unsupported is set to true when the binary operator is not supported.
7340     // SkipOperation is set to true when UB occur with the given operand pair
7341     // (LHS, RHS).
7342     // TODO: we should look at nsw and nuw keywords to handle operations
7343     //       that create poison or undef value.
7344     switch (BinOpcode) {
7345     default:
7346       Unsupported = true;
7347       return LHS;
7348     case Instruction::Add:
7349       return LHS + RHS;
7350     case Instruction::Sub:
7351       return LHS - RHS;
7352     case Instruction::Mul:
7353       return LHS * RHS;
7354     case Instruction::UDiv:
7355       if (RHS.isNullValue()) {
7356         SkipOperation = true;
7357         return LHS;
7358       }
7359       return LHS.udiv(RHS);
7360     case Instruction::SDiv:
7361       if (RHS.isNullValue()) {
7362         SkipOperation = true;
7363         return LHS;
7364       }
7365       return LHS.sdiv(RHS);
7366     case Instruction::URem:
7367       if (RHS.isNullValue()) {
7368         SkipOperation = true;
7369         return LHS;
7370       }
7371       return LHS.urem(RHS);
7372     case Instruction::SRem:
7373       if (RHS.isNullValue()) {
7374         SkipOperation = true;
7375         return LHS;
7376       }
7377       return LHS.srem(RHS);
7378     case Instruction::Shl:
7379       return LHS.shl(RHS);
7380     case Instruction::LShr:
7381       return LHS.lshr(RHS);
7382     case Instruction::AShr:
7383       return LHS.ashr(RHS);
7384     case Instruction::And:
7385       return LHS & RHS;
7386     case Instruction::Or:
7387       return LHS | RHS;
7388     case Instruction::Xor:
7389       return LHS ^ RHS;
7390     }
7391   }
7392 
7393   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7394     auto AssumedBefore = getAssumed();
7395     Value *LHS = ICI->getOperand(0);
7396     Value *RHS = ICI->getOperand(1);
7397     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7398       return indicatePessimisticFixpoint();
7399 
7400     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7401     if (!LHSAA.isValidState())
7402       return indicatePessimisticFixpoint();
7403 
7404     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7405     if (!RHSAA.isValidState())
7406       return indicatePessimisticFixpoint();
7407 
7408     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7409     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7410 
7411     // TODO: Handle undef correctly.
7412     bool MaybeTrue = false, MaybeFalse = false;
7413     for (const APInt &L : LHSAAPVS) {
7414       for (const APInt &R : RHSAAPVS) {
7415         bool CmpResult = calculateICmpInst(ICI, L, R);
7416         MaybeTrue |= CmpResult;
7417         MaybeFalse |= !CmpResult;
7418         if (MaybeTrue & MaybeFalse)
7419           return indicatePessimisticFixpoint();
7420       }
7421     }
7422     if (MaybeTrue)
7423       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7424     if (MaybeFalse)
7425       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7426     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7427                                          : ChangeStatus::CHANGED;
7428   }
7429 
7430   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7431     auto AssumedBefore = getAssumed();
7432     Value *LHS = SI->getTrueValue();
7433     Value *RHS = SI->getFalseValue();
7434     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7435       return indicatePessimisticFixpoint();
7436 
7437     // TODO: Use assumed simplified condition value
7438     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7439     if (!LHSAA.isValidState())
7440       return indicatePessimisticFixpoint();
7441 
7442     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7443     if (!RHSAA.isValidState())
7444       return indicatePessimisticFixpoint();
7445 
7446     unionAssumed(LHSAA);
7447     unionAssumed(RHSAA);
7448     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7449                                          : ChangeStatus::CHANGED;
7450   }
7451 
7452   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7453     auto AssumedBefore = getAssumed();
7454     if (!CI->isIntegerCast())
7455       return indicatePessimisticFixpoint();
7456     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7457     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7458     Value *Src = CI->getOperand(0);
7459     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src));
7460     if (!SrcAA.isValidState())
7461       return indicatePessimisticFixpoint();
7462     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7463     for (const APInt &S : SrcAAPVS) {
7464       APInt T = calculateCastInst(CI, S, ResultBitWidth);
7465       unionAssumed(T);
7466     }
7467     // TODO: Handle undef correctly.
7468     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7469                                          : ChangeStatus::CHANGED;
7470   }
7471 
7472   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7473     auto AssumedBefore = getAssumed();
7474     Value *LHS = BinOp->getOperand(0);
7475     Value *RHS = BinOp->getOperand(1);
7476     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7477       return indicatePessimisticFixpoint();
7478 
7479     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7480     if (!LHSAA.isValidState())
7481       return indicatePessimisticFixpoint();
7482 
7483     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7484     if (!RHSAA.isValidState())
7485       return indicatePessimisticFixpoint();
7486 
7487     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7488     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7489 
7490     // TODO: Handle undef correctly
7491     for (const APInt &L : LHSAAPVS) {
7492       for (const APInt &R : RHSAAPVS) {
7493         bool SkipOperation = false;
7494         bool Unsupported = false;
7495         APInt Result =
7496             calculateBinaryOperator(BinOp, L, R, SkipOperation, Unsupported);
7497         if (Unsupported)
7498           return indicatePessimisticFixpoint();
7499         // If SkipOperation is true, we can ignore this operand pair (L, R).
7500         if (!SkipOperation)
7501           unionAssumed(Result);
7502       }
7503     }
7504     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7505                                          : ChangeStatus::CHANGED;
7506   }
7507 
7508   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7509     auto AssumedBefore = getAssumed();
7510     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7511       Value *IncomingValue = PHI->getIncomingValue(u);
7512       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7513           *this, IRPosition::value(*IncomingValue));
7514       if (!PotentialValuesAA.isValidState())
7515         return indicatePessimisticFixpoint();
7516       unionAssumed(PotentialValuesAA.getAssumed());
7517     }
7518     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7519                                          : ChangeStatus::CHANGED;
7520   }
7521 
7522   /// See AbstractAttribute::updateImpl(...).
7523   ChangeStatus updateImpl(Attributor &A) override {
7524     Value &V = getAssociatedValue();
7525     Instruction *I = dyn_cast<Instruction>(&V);
7526 
7527     if (auto *ICI = dyn_cast<ICmpInst>(I))
7528       return updateWithICmpInst(A, ICI);
7529 
7530     if (auto *SI = dyn_cast<SelectInst>(I))
7531       return updateWithSelectInst(A, SI);
7532 
7533     if (auto *CI = dyn_cast<CastInst>(I))
7534       return updateWithCastInst(A, CI);
7535 
7536     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7537       return updateWithBinaryOperator(A, BinOp);
7538 
7539     if (auto *PHI = dyn_cast<PHINode>(I))
7540       return updateWithPHINode(A, PHI);
7541 
7542     return indicatePessimisticFixpoint();
7543   }
7544 
7545   /// See AbstractAttribute::trackStatistics()
7546   void trackStatistics() const override {
7547     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7548   }
7549 };
7550 
7551 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7552   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7553       : AAPotentialValuesImpl(IRP, A) {}
7554 
7555   /// See AbstractAttribute::initialize(...).
7556   ChangeStatus updateImpl(Attributor &A) override {
7557     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7558                      "not be called");
7559   }
7560 
7561   /// See AbstractAttribute::trackStatistics()
7562   void trackStatistics() const override {
7563     STATS_DECLTRACK_FN_ATTR(potential_values)
7564   }
7565 };
7566 
7567 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7568   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7569       : AAPotentialValuesFunction(IRP, A) {}
7570 
7571   /// See AbstractAttribute::trackStatistics()
7572   void trackStatistics() const override {
7573     STATS_DECLTRACK_CS_ATTR(potential_values)
7574   }
7575 };
7576 
7577 struct AAPotentialValuesCallSiteReturned
7578     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7579   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7580       : AACallSiteReturnedFromReturned<AAPotentialValues,
7581                                        AAPotentialValuesImpl>(IRP, A) {}
7582 
7583   /// See AbstractAttribute::trackStatistics()
7584   void trackStatistics() const override {
7585     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7586   }
7587 };
7588 
7589 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7590   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7591       : AAPotentialValuesFloating(IRP, A) {}
7592 
7593   /// See AbstractAttribute::initialize(..).
7594   void initialize(Attributor &A) override {
7595     Value &V = getAssociatedValue();
7596 
7597     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7598       unionAssumed(C->getValue());
7599       indicateOptimisticFixpoint();
7600       return;
7601     }
7602 
7603     if (isa<UndefValue>(&V)) {
7604       // Collapse the undef state to 0.
7605       unionAssumed(
7606           APInt(/* numBits */ getAssociatedType()->getIntegerBitWidth(),
7607                 /* val */ 0));
7608       indicateOptimisticFixpoint();
7609       return;
7610     }
7611   }
7612 
7613   /// See AbstractAttribute::updateImpl(...).
7614   ChangeStatus updateImpl(Attributor &A) override {
7615     Value &V = getAssociatedValue();
7616     auto AssumedBefore = getAssumed();
7617     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V));
7618     const auto &S = AA.getAssumed();
7619     unionAssumed(S);
7620     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7621                                          : ChangeStatus::CHANGED;
7622   }
7623 
7624   /// See AbstractAttribute::trackStatistics()
7625   void trackStatistics() const override {
7626     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7627   }
7628 };
7629 
7630 } // namespace
7631 
7632 const char AAReturnedValues::ID = 0;
7633 const char AANoUnwind::ID = 0;
7634 const char AANoSync::ID = 0;
7635 const char AANoFree::ID = 0;
7636 const char AANonNull::ID = 0;
7637 const char AANoRecurse::ID = 0;
7638 const char AAWillReturn::ID = 0;
7639 const char AAUndefinedBehavior::ID = 0;
7640 const char AANoAlias::ID = 0;
7641 const char AAReachability::ID = 0;
7642 const char AANoReturn::ID = 0;
7643 const char AAIsDead::ID = 0;
7644 const char AADereferenceable::ID = 0;
7645 const char AAAlign::ID = 0;
7646 const char AANoCapture::ID = 0;
7647 const char AAValueSimplify::ID = 0;
7648 const char AAHeapToStack::ID = 0;
7649 const char AAPrivatizablePtr::ID = 0;
7650 const char AAMemoryBehavior::ID = 0;
7651 const char AAMemoryLocation::ID = 0;
7652 const char AAValueConstantRange::ID = 0;
7653 const char AAPotentialValues::ID = 0;
7654 
7655 // Macro magic to create the static generator function for attributes that
7656 // follow the naming scheme.
7657 
7658 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7659   case IRPosition::PK:                                                         \
7660     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7661 
7662 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7663   case IRPosition::PK:                                                         \
7664     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7665     ++NumAAs;                                                                  \
7666     break;
7667 
7668 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7669   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7670     CLASS *AA = nullptr;                                                       \
7671     switch (IRP.getPositionKind()) {                                           \
7672       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7673       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7674       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7675       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7676       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7677       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7678       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7679       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7680     }                                                                          \
7681     return *AA;                                                                \
7682   }
7683 
7684 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7685   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7686     CLASS *AA = nullptr;                                                       \
7687     switch (IRP.getPositionKind()) {                                           \
7688       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7689       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7690       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7691       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7692       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7693       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7694       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7695       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7696     }                                                                          \
7697     return *AA;                                                                \
7698   }
7699 
7700 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7701   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7702     CLASS *AA = nullptr;                                                       \
7703     switch (IRP.getPositionKind()) {                                           \
7704       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7705       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7706       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7707       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7708       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7709       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7710       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7711       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7712     }                                                                          \
7713     return *AA;                                                                \
7714   }
7715 
7716 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7717   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7718     CLASS *AA = nullptr;                                                       \
7719     switch (IRP.getPositionKind()) {                                           \
7720       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7721       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7722       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7723       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7724       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7725       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7726       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7727       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7728     }                                                                          \
7729     return *AA;                                                                \
7730   }
7731 
7732 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7733   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7734     CLASS *AA = nullptr;                                                       \
7735     switch (IRP.getPositionKind()) {                                           \
7736       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7737       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7738       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7739       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7740       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7741       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7742       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7743       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7744     }                                                                          \
7745     return *AA;                                                                \
7746   }
7747 
7748 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7749 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7750 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7751 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7752 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7753 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7754 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7755 
7756 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7757 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7758 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7759 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7760 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7761 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7762 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7763 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
7764 
7765 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7766 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7767 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7768 
7769 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7770 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7771 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7772 
7773 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7774 
7775 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7776 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7777 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7778 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7779 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7780 #undef SWITCH_PK_CREATE
7781 #undef SWITCH_PK_INV
7782