1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AssumeBundleQueries.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/LazyValueInfo.h"
23 #include "llvm/Analysis/MemoryBuiltins.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/Analysis/TargetTransformInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/NoFolder.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 
34 #include <cassert>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "attributor"
39 
40 static cl::opt<bool> ManifestInternal(
41     "attributor-manifest-internal", cl::Hidden,
42     cl::desc("Manifest Attributor internal string attributes."),
43     cl::init(false));
44 
45 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
46                                        cl::Hidden);
47 
48 template <>
49 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
50 
51 static cl::opt<unsigned, true> MaxPotentialValues(
52     "attributor-max-potential-values", cl::Hidden,
53     cl::desc("Maximum number of potential values to be "
54              "tracked for each position."),
55     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
56     cl::init(7));
57 
58 STATISTIC(NumAAs, "Number of abstract attributes created");
59 
60 // Some helper macros to deal with statistics tracking.
61 //
62 // Usage:
63 // For simple IR attribute tracking overload trackStatistics in the abstract
64 // attribute and choose the right STATS_DECLTRACK_********* macro,
65 // e.g.,:
66 //  void trackStatistics() const override {
67 //    STATS_DECLTRACK_ARG_ATTR(returned)
68 //  }
69 // If there is a single "increment" side one can use the macro
70 // STATS_DECLTRACK with a custom message. If there are multiple increment
71 // sides, STATS_DECL and STATS_TRACK can also be used separately.
72 //
73 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
74   ("Number of " #TYPE " marked '" #NAME "'")
75 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
76 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
77 #define STATS_DECL(NAME, TYPE, MSG)                                            \
78   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
79 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
80 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
81   {                                                                            \
82     STATS_DECL(NAME, TYPE, MSG)                                                \
83     STATS_TRACK(NAME, TYPE)                                                    \
84   }
85 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
86   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
87 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
88   STATS_DECLTRACK(NAME, CSArguments,                                           \
89                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
90 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
91   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
92 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
94 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
95   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
96                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
97 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
98   STATS_DECLTRACK(NAME, CSReturn,                                              \
99                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
100 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
101   STATS_DECLTRACK(NAME, Floating,                                              \
102                   ("Number of floating values known to be '" #NAME "'"))
103 
104 // Specialization of the operator<< for abstract attributes subclasses. This
105 // disambiguates situations where multiple operators are applicable.
106 namespace llvm {
107 #define PIPE_OPERATOR(CLASS)                                                   \
108   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
109     return OS << static_cast<const AbstractAttribute &>(AA);                   \
110   }
111 
112 PIPE_OPERATOR(AAIsDead)
113 PIPE_OPERATOR(AANoUnwind)
114 PIPE_OPERATOR(AANoSync)
115 PIPE_OPERATOR(AANoRecurse)
116 PIPE_OPERATOR(AAWillReturn)
117 PIPE_OPERATOR(AANoReturn)
118 PIPE_OPERATOR(AAReturnedValues)
119 PIPE_OPERATOR(AANonNull)
120 PIPE_OPERATOR(AANoAlias)
121 PIPE_OPERATOR(AADereferenceable)
122 PIPE_OPERATOR(AAAlign)
123 PIPE_OPERATOR(AANoCapture)
124 PIPE_OPERATOR(AAValueSimplify)
125 PIPE_OPERATOR(AANoFree)
126 PIPE_OPERATOR(AAHeapToStack)
127 PIPE_OPERATOR(AAReachability)
128 PIPE_OPERATOR(AAMemoryBehavior)
129 PIPE_OPERATOR(AAMemoryLocation)
130 PIPE_OPERATOR(AAValueConstantRange)
131 PIPE_OPERATOR(AAPrivatizablePtr)
132 PIPE_OPERATOR(AAUndefinedBehavior)
133 PIPE_OPERATOR(AAPotentialValues)
134 
135 #undef PIPE_OPERATOR
136 } // namespace llvm
137 
138 namespace {
139 
140 static Optional<ConstantInt *>
141 getAssumedConstantInt(Attributor &A, const Value &V,
142                       const AbstractAttribute &AA,
143                       bool &UsedAssumedInformation) {
144   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
145   if (C.hasValue())
146     return dyn_cast_or_null<ConstantInt>(C.getValue());
147   return llvm::None;
148 }
149 
150 /// Get pointer operand of memory accessing instruction. If \p I is
151 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
152 /// is set to false and the instruction is volatile, return nullptr.
153 static const Value *getPointerOperand(const Instruction *I,
154                                       bool AllowVolatile) {
155   if (auto *LI = dyn_cast<LoadInst>(I)) {
156     if (!AllowVolatile && LI->isVolatile())
157       return nullptr;
158     return LI->getPointerOperand();
159   }
160 
161   if (auto *SI = dyn_cast<StoreInst>(I)) {
162     if (!AllowVolatile && SI->isVolatile())
163       return nullptr;
164     return SI->getPointerOperand();
165   }
166 
167   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
168     if (!AllowVolatile && CXI->isVolatile())
169       return nullptr;
170     return CXI->getPointerOperand();
171   }
172 
173   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
174     if (!AllowVolatile && RMWI->isVolatile())
175       return nullptr;
176     return RMWI->getPointerOperand();
177   }
178 
179   return nullptr;
180 }
181 
182 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
183 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
184 /// getelement pointer instructions that traverse the natural type of \p Ptr if
185 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
186 /// through a cast to i8*.
187 ///
188 /// TODO: This could probably live somewhere more prominantly if it doesn't
189 ///       already exist.
190 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
191                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
192   assert(Offset >= 0 && "Negative offset not supported yet!");
193   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
194                     << "-bytes as " << *ResTy << "\n");
195 
196   // The initial type we are trying to traverse to get nice GEPs.
197   Type *Ty = Ptr->getType();
198 
199   SmallVector<Value *, 4> Indices;
200   std::string GEPName = Ptr->getName().str();
201   while (Offset) {
202     uint64_t Idx, Rem;
203 
204     if (auto *STy = dyn_cast<StructType>(Ty)) {
205       const StructLayout *SL = DL.getStructLayout(STy);
206       if (int64_t(SL->getSizeInBytes()) < Offset)
207         break;
208       Idx = SL->getElementContainingOffset(Offset);
209       assert(Idx < STy->getNumElements() && "Offset calculation error!");
210       Rem = Offset - SL->getElementOffset(Idx);
211       Ty = STy->getElementType(Idx);
212     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
213       Ty = PTy->getElementType();
214       if (!Ty->isSized())
215         break;
216       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
217       assert(ElementSize && "Expected type with size!");
218       Idx = Offset / ElementSize;
219       Rem = Offset % ElementSize;
220     } else {
221       // Non-aggregate type, we cast and make byte-wise progress now.
222       break;
223     }
224 
225     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
226                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
227 
228     GEPName += "." + std::to_string(Idx);
229     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
230     Offset = Rem;
231   }
232 
233   // Create a GEP if we collected indices above.
234   if (Indices.size())
235     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
236 
237   // If an offset is left we use byte-wise adjustment.
238   if (Offset) {
239     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
240     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
241                         GEPName + ".b" + Twine(Offset));
242   }
243 
244   // Ensure the result has the requested type.
245   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
246 
247   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
248   return Ptr;
249 }
250 
251 /// Recursively visit all values that might become \p IRP at some point. This
252 /// will be done by looking through cast instructions, selects, phis, and calls
253 /// with the "returned" attribute. Once we cannot look through the value any
254 /// further, the callback \p VisitValueCB is invoked and passed the current
255 /// value, the \p State, and a flag to indicate if we stripped anything.
256 /// Stripped means that we unpacked the value associated with \p IRP at least
257 /// once. Note that the value used for the callback may still be the value
258 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
259 /// we will never visit more values than specified by \p MaxValues.
260 template <typename AAType, typename StateTy>
261 static bool genericValueTraversal(
262     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
263     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
264         VisitValueCB,
265     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
266     function_ref<Value *(Value *)> StripCB = nullptr) {
267 
268   const AAIsDead *LivenessAA = nullptr;
269   if (IRP.getAnchorScope())
270     LivenessAA = &A.getAAFor<AAIsDead>(
271         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
272         /* TrackDependence */ false);
273   bool AnyDead = false;
274 
275   using Item = std::pair<Value *, const Instruction *>;
276   SmallSet<Item, 16> Visited;
277   SmallVector<Item, 16> Worklist;
278   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
279 
280   int Iteration = 0;
281   do {
282     Item I = Worklist.pop_back_val();
283     Value *V = I.first;
284     CtxI = I.second;
285     if (StripCB)
286       V = StripCB(V);
287 
288     // Check if we should process the current value. To prevent endless
289     // recursion keep a record of the values we followed!
290     if (!Visited.insert(I).second)
291       continue;
292 
293     // Make sure we limit the compile time for complex expressions.
294     if (Iteration++ >= MaxValues)
295       return false;
296 
297     // Explicitly look through calls with a "returned" attribute if we do
298     // not have a pointer as stripPointerCasts only works on them.
299     Value *NewV = nullptr;
300     if (V->getType()->isPointerTy()) {
301       NewV = V->stripPointerCasts();
302     } else {
303       auto *CB = dyn_cast<CallBase>(V);
304       if (CB && CB->getCalledFunction()) {
305         for (Argument &Arg : CB->getCalledFunction()->args())
306           if (Arg.hasReturnedAttr()) {
307             NewV = CB->getArgOperand(Arg.getArgNo());
308             break;
309           }
310       }
311     }
312     if (NewV && NewV != V) {
313       Worklist.push_back({NewV, CtxI});
314       continue;
315     }
316 
317     // Look through select instructions, visit both potential values.
318     if (auto *SI = dyn_cast<SelectInst>(V)) {
319       Worklist.push_back({SI->getTrueValue(), CtxI});
320       Worklist.push_back({SI->getFalseValue(), CtxI});
321       continue;
322     }
323 
324     // Look through phi nodes, visit all live operands.
325     if (auto *PHI = dyn_cast<PHINode>(V)) {
326       assert(LivenessAA &&
327              "Expected liveness in the presence of instructions!");
328       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
329         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
330         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
331                             LivenessAA,
332                             /* CheckBBLivenessOnly */ true)) {
333           AnyDead = true;
334           continue;
335         }
336         Worklist.push_back(
337             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
338       }
339       continue;
340     }
341 
342     if (UseValueSimplify && !isa<Constant>(V)) {
343       bool UsedAssumedInformation = false;
344       Optional<Constant *> C =
345           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
346       if (!C.hasValue())
347         continue;
348       if (Value *NewV = C.getValue()) {
349         Worklist.push_back({NewV, CtxI});
350         continue;
351       }
352     }
353 
354     // Once a leaf is reached we inform the user through the callback.
355     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
356       return false;
357   } while (!Worklist.empty());
358 
359   // If we actually used liveness information so we have to record a dependence.
360   if (AnyDead)
361     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
362 
363   // All values have been visited.
364   return true;
365 }
366 
367 const Value *stripAndAccumulateMinimalOffsets(
368     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
369     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
370     bool UseAssumed = false) {
371 
372   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
373     const IRPosition &Pos = IRPosition::value(V);
374     // Only track dependence if we are going to use the assumed info.
375     const AAValueConstantRange &ValueConstantRangeAA =
376         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
377                                          /* TrackDependence */ UseAssumed);
378     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
379                                      : ValueConstantRangeAA.getKnown();
380     // We can only use the lower part of the range because the upper part can
381     // be higher than what the value can really be.
382     ROffset = Range.getSignedMin();
383     return true;
384   };
385 
386   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
387                                                 AttributorAnalysis);
388 }
389 
390 static const Value *getMinimalBaseOfAccsesPointerOperand(
391     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
392     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
393   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
394   if (!Ptr)
395     return nullptr;
396   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
397   const Value *Base = stripAndAccumulateMinimalOffsets(
398       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
399 
400   BytesOffset = OffsetAPInt.getSExtValue();
401   return Base;
402 }
403 
404 static const Value *
405 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
406                                      const DataLayout &DL,
407                                      bool AllowNonInbounds = false) {
408   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
409   if (!Ptr)
410     return nullptr;
411 
412   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
413                                           AllowNonInbounds);
414 }
415 
416 /// Helper function to clamp a state \p S of type \p StateType with the
417 /// information in \p R and indicate/return if \p S did change (as-in update is
418 /// required to be run again).
419 template <typename StateType>
420 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
421   auto Assumed = S.getAssumed();
422   S ^= R;
423   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
424                                    : ChangeStatus::CHANGED;
425 }
426 
427 /// Clamp the information known for all returned values of a function
428 /// (identified by \p QueryingAA) into \p S.
429 template <typename AAType, typename StateType = typename AAType::StateType>
430 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
431                                      StateType &S) {
432   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
433                     << QueryingAA << " into " << S << "\n");
434 
435   assert((QueryingAA.getIRPosition().getPositionKind() ==
436               IRPosition::IRP_RETURNED ||
437           QueryingAA.getIRPosition().getPositionKind() ==
438               IRPosition::IRP_CALL_SITE_RETURNED) &&
439          "Can only clamp returned value states for a function returned or call "
440          "site returned position!");
441 
442   // Use an optional state as there might not be any return values and we want
443   // to join (IntegerState::operator&) the state of all there are.
444   Optional<StateType> T;
445 
446   // Callback for each possibly returned value.
447   auto CheckReturnValue = [&](Value &RV) -> bool {
448     const IRPosition &RVPos = IRPosition::value(RV);
449     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
450     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
451                       << " @ " << RVPos << "\n");
452     const StateType &AAS = static_cast<const StateType &>(AA.getState());
453     if (T.hasValue())
454       *T &= AAS;
455     else
456       T = AAS;
457     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
458                       << "\n");
459     return T->isValidState();
460   };
461 
462   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
463     S.indicatePessimisticFixpoint();
464   else if (T.hasValue())
465     S ^= *T;
466 }
467 
468 /// Helper class for generic deduction: return value -> returned position.
469 template <typename AAType, typename BaseType,
470           typename StateType = typename BaseType::StateType>
471 struct AAReturnedFromReturnedValues : public BaseType {
472   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
473       : BaseType(IRP, A) {}
474 
475   /// See AbstractAttribute::updateImpl(...).
476   ChangeStatus updateImpl(Attributor &A) override {
477     StateType S(StateType::getBestState(this->getState()));
478     clampReturnedValueStates<AAType, StateType>(A, *this, S);
479     // TODO: If we know we visited all returned values, thus no are assumed
480     // dead, we can take the known information from the state T.
481     return clampStateAndIndicateChange<StateType>(this->getState(), S);
482   }
483 };
484 
485 /// Clamp the information known at all call sites for a given argument
486 /// (identified by \p QueryingAA) into \p S.
487 template <typename AAType, typename StateType = typename AAType::StateType>
488 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
489                                         StateType &S) {
490   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
491                     << QueryingAA << " into " << S << "\n");
492 
493   assert(QueryingAA.getIRPosition().getPositionKind() ==
494              IRPosition::IRP_ARGUMENT &&
495          "Can only clamp call site argument states for an argument position!");
496 
497   // Use an optional state as there might not be any return values and we want
498   // to join (IntegerState::operator&) the state of all there are.
499   Optional<StateType> T;
500 
501   // The argument number which is also the call site argument number.
502   unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
503 
504   auto CallSiteCheck = [&](AbstractCallSite ACS) {
505     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
506     // Check if a coresponding argument was found or if it is on not associated
507     // (which can happen for callback calls).
508     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
509       return false;
510 
511     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
512     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
513                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
514     const StateType &AAS = static_cast<const StateType &>(AA.getState());
515     if (T.hasValue())
516       *T &= AAS;
517     else
518       T = AAS;
519     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
520                       << "\n");
521     return T->isValidState();
522   };
523 
524   bool AllCallSitesKnown;
525   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
526                               AllCallSitesKnown))
527     S.indicatePessimisticFixpoint();
528   else if (T.hasValue())
529     S ^= *T;
530 }
531 
532 /// Helper class for generic deduction: call site argument -> argument position.
533 template <typename AAType, typename BaseType,
534           typename StateType = typename AAType::StateType>
535 struct AAArgumentFromCallSiteArguments : public BaseType {
536   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
537       : BaseType(IRP, A) {}
538 
539   /// See AbstractAttribute::updateImpl(...).
540   ChangeStatus updateImpl(Attributor &A) override {
541     StateType S(StateType::getBestState(this->getState()));
542     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
543     // TODO: If we know we visited all incoming values, thus no are assumed
544     // dead, we can take the known information from the state T.
545     return clampStateAndIndicateChange<StateType>(this->getState(), S);
546   }
547 };
548 
549 /// Helper class for generic replication: function returned -> cs returned.
550 template <typename AAType, typename BaseType,
551           typename StateType = typename BaseType::StateType>
552 struct AACallSiteReturnedFromReturned : public BaseType {
553   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
554       : BaseType(IRP, A) {}
555 
556   /// See AbstractAttribute::updateImpl(...).
557   ChangeStatus updateImpl(Attributor &A) override {
558     assert(this->getIRPosition().getPositionKind() ==
559                IRPosition::IRP_CALL_SITE_RETURNED &&
560            "Can only wrap function returned positions for call site returned "
561            "positions!");
562     auto &S = this->getState();
563 
564     const Function *AssociatedFunction =
565         this->getIRPosition().getAssociatedFunction();
566     if (!AssociatedFunction)
567       return S.indicatePessimisticFixpoint();
568 
569     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
570     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
571     return clampStateAndIndicateChange(
572         S, static_cast<const StateType &>(AA.getState()));
573   }
574 };
575 
576 /// Helper function to accumulate uses.
577 template <class AAType, typename StateType = typename AAType::StateType>
578 static void followUsesInContext(AAType &AA, Attributor &A,
579                                 MustBeExecutedContextExplorer &Explorer,
580                                 const Instruction *CtxI,
581                                 SetVector<const Use *> &Uses,
582                                 StateType &State) {
583   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
584   for (unsigned u = 0; u < Uses.size(); ++u) {
585     const Use *U = Uses[u];
586     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
587       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
588       if (Found && AA.followUseInMBEC(A, U, UserI, State))
589         for (const Use &Us : UserI->uses())
590           Uses.insert(&Us);
591     }
592   }
593 }
594 
595 /// Use the must-be-executed-context around \p I to add information into \p S.
596 /// The AAType class is required to have `followUseInMBEC` method with the
597 /// following signature and behaviour:
598 ///
599 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
600 /// U - Underlying use.
601 /// I - The user of the \p U.
602 /// Returns true if the value should be tracked transitively.
603 ///
604 template <class AAType, typename StateType = typename AAType::StateType>
605 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
606                              Instruction &CtxI) {
607 
608   // Container for (transitive) uses of the associated value.
609   SetVector<const Use *> Uses;
610   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
611     Uses.insert(&U);
612 
613   MustBeExecutedContextExplorer &Explorer =
614       A.getInfoCache().getMustBeExecutedContextExplorer();
615 
616   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
617 
618   if (S.isAtFixpoint())
619     return;
620 
621   SmallVector<const BranchInst *, 4> BrInsts;
622   auto Pred = [&](const Instruction *I) {
623     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
624       if (Br->isConditional())
625         BrInsts.push_back(Br);
626     return true;
627   };
628 
629   // Here, accumulate conditional branch instructions in the context. We
630   // explore the child paths and collect the known states. The disjunction of
631   // those states can be merged to its own state. Let ParentState_i be a state
632   // to indicate the known information for an i-th branch instruction in the
633   // context. ChildStates are created for its successors respectively.
634   //
635   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
636   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
637   //      ...
638   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
639   //
640   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
641   //
642   // FIXME: Currently, recursive branches are not handled. For example, we
643   // can't deduce that ptr must be dereferenced in below function.
644   //
645   // void f(int a, int c, int *ptr) {
646   //    if(a)
647   //      if (b) {
648   //        *ptr = 0;
649   //      } else {
650   //        *ptr = 1;
651   //      }
652   //    else {
653   //      if (b) {
654   //        *ptr = 0;
655   //      } else {
656   //        *ptr = 1;
657   //      }
658   //    }
659   // }
660 
661   Explorer.checkForAllContext(&CtxI, Pred);
662   for (const BranchInst *Br : BrInsts) {
663     StateType ParentState;
664 
665     // The known state of the parent state is a conjunction of children's
666     // known states so it is initialized with a best state.
667     ParentState.indicateOptimisticFixpoint();
668 
669     for (const BasicBlock *BB : Br->successors()) {
670       StateType ChildState;
671 
672       size_t BeforeSize = Uses.size();
673       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
674 
675       // Erase uses which only appear in the child.
676       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
677         It = Uses.erase(It);
678 
679       ParentState &= ChildState;
680     }
681 
682     // Use only known state.
683     S += ParentState;
684   }
685 }
686 
687 /// -----------------------NoUnwind Function Attribute--------------------------
688 
689 struct AANoUnwindImpl : AANoUnwind {
690   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
691 
692   const std::string getAsStr() const override {
693     return getAssumed() ? "nounwind" : "may-unwind";
694   }
695 
696   /// See AbstractAttribute::updateImpl(...).
697   ChangeStatus updateImpl(Attributor &A) override {
698     auto Opcodes = {
699         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
700         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
701         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
702 
703     auto CheckForNoUnwind = [&](Instruction &I) {
704       if (!I.mayThrow())
705         return true;
706 
707       if (const auto *CB = dyn_cast<CallBase>(&I)) {
708         const auto &NoUnwindAA =
709             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
710         return NoUnwindAA.isAssumedNoUnwind();
711       }
712       return false;
713     };
714 
715     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
716       return indicatePessimisticFixpoint();
717 
718     return ChangeStatus::UNCHANGED;
719   }
720 };
721 
722 struct AANoUnwindFunction final : public AANoUnwindImpl {
723   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
724       : AANoUnwindImpl(IRP, A) {}
725 
726   /// See AbstractAttribute::trackStatistics()
727   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
728 };
729 
730 /// NoUnwind attribute deduction for a call sites.
731 struct AANoUnwindCallSite final : AANoUnwindImpl {
732   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
733       : AANoUnwindImpl(IRP, A) {}
734 
735   /// See AbstractAttribute::initialize(...).
736   void initialize(Attributor &A) override {
737     AANoUnwindImpl::initialize(A);
738     Function *F = getAssociatedFunction();
739     if (!F)
740       indicatePessimisticFixpoint();
741   }
742 
743   /// See AbstractAttribute::updateImpl(...).
744   ChangeStatus updateImpl(Attributor &A) override {
745     // TODO: Once we have call site specific value information we can provide
746     //       call site specific liveness information and then it makes
747     //       sense to specialize attributes for call sites arguments instead of
748     //       redirecting requests to the callee argument.
749     Function *F = getAssociatedFunction();
750     const IRPosition &FnPos = IRPosition::function(*F);
751     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
752     return clampStateAndIndicateChange(
753         getState(),
754         static_cast<const AANoUnwind::StateType &>(FnAA.getState()));
755   }
756 
757   /// See AbstractAttribute::trackStatistics()
758   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
759 };
760 
761 /// --------------------- Function Return Values -------------------------------
762 
763 /// "Attribute" that collects all potential returned values and the return
764 /// instructions that they arise from.
765 ///
766 /// If there is a unique returned value R, the manifest method will:
767 ///   - mark R with the "returned" attribute, if R is an argument.
768 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
769 
770   /// Mapping of values potentially returned by the associated function to the
771   /// return instructions that might return them.
772   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
773 
774   /// Mapping to remember the number of returned values for a call site such
775   /// that we can avoid updates if nothing changed.
776   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
777 
778   /// Set of unresolved calls returned by the associated function.
779   SmallSetVector<CallBase *, 4> UnresolvedCalls;
780 
781   /// State flags
782   ///
783   ///{
784   bool IsFixed = false;
785   bool IsValidState = true;
786   ///}
787 
788 public:
789   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
790       : AAReturnedValues(IRP, A) {}
791 
792   /// See AbstractAttribute::initialize(...).
793   void initialize(Attributor &A) override {
794     // Reset the state.
795     IsFixed = false;
796     IsValidState = true;
797     ReturnedValues.clear();
798 
799     Function *F = getAssociatedFunction();
800     if (!F) {
801       indicatePessimisticFixpoint();
802       return;
803     }
804     assert(!F->getReturnType()->isVoidTy() &&
805            "Did not expect a void return type!");
806 
807     // The map from instruction opcodes to those instructions in the function.
808     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
809 
810     // Look through all arguments, if one is marked as returned we are done.
811     for (Argument &Arg : F->args()) {
812       if (Arg.hasReturnedAttr()) {
813         auto &ReturnInstSet = ReturnedValues[&Arg];
814         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
815           for (Instruction *RI : *Insts)
816             ReturnInstSet.insert(cast<ReturnInst>(RI));
817 
818         indicateOptimisticFixpoint();
819         return;
820       }
821     }
822 
823     if (!A.isFunctionIPOAmendable(*F))
824       indicatePessimisticFixpoint();
825   }
826 
827   /// See AbstractAttribute::manifest(...).
828   ChangeStatus manifest(Attributor &A) override;
829 
830   /// See AbstractAttribute::getState(...).
831   AbstractState &getState() override { return *this; }
832 
833   /// See AbstractAttribute::getState(...).
834   const AbstractState &getState() const override { return *this; }
835 
836   /// See AbstractAttribute::updateImpl(Attributor &A).
837   ChangeStatus updateImpl(Attributor &A) override;
838 
839   llvm::iterator_range<iterator> returned_values() override {
840     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
841   }
842 
843   llvm::iterator_range<const_iterator> returned_values() const override {
844     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
845   }
846 
847   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
848     return UnresolvedCalls;
849   }
850 
851   /// Return the number of potential return values, -1 if unknown.
852   size_t getNumReturnValues() const override {
853     return isValidState() ? ReturnedValues.size() : -1;
854   }
855 
856   /// Return an assumed unique return value if a single candidate is found. If
857   /// there cannot be one, return a nullptr. If it is not clear yet, return the
858   /// Optional::NoneType.
859   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
860 
861   /// See AbstractState::checkForAllReturnedValues(...).
862   bool checkForAllReturnedValuesAndReturnInsts(
863       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
864       const override;
865 
866   /// Pretty print the attribute similar to the IR representation.
867   const std::string getAsStr() const override;
868 
869   /// See AbstractState::isAtFixpoint().
870   bool isAtFixpoint() const override { return IsFixed; }
871 
872   /// See AbstractState::isValidState().
873   bool isValidState() const override { return IsValidState; }
874 
875   /// See AbstractState::indicateOptimisticFixpoint(...).
876   ChangeStatus indicateOptimisticFixpoint() override {
877     IsFixed = true;
878     return ChangeStatus::UNCHANGED;
879   }
880 
881   ChangeStatus indicatePessimisticFixpoint() override {
882     IsFixed = true;
883     IsValidState = false;
884     return ChangeStatus::CHANGED;
885   }
886 };
887 
888 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
889   ChangeStatus Changed = ChangeStatus::UNCHANGED;
890 
891   // Bookkeeping.
892   assert(isValidState());
893   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
894                   "Number of function with known return values");
895 
896   // Check if we have an assumed unique return value that we could manifest.
897   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
898 
899   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
900     return Changed;
901 
902   // Bookkeeping.
903   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
904                   "Number of function with unique return");
905 
906   // Callback to replace the uses of CB with the constant C.
907   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
908     if (CB.use_empty())
909       return ChangeStatus::UNCHANGED;
910     if (A.changeValueAfterManifest(CB, C))
911       return ChangeStatus::CHANGED;
912     return ChangeStatus::UNCHANGED;
913   };
914 
915   // If the assumed unique return value is an argument, annotate it.
916   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
917     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
918             getAssociatedFunction()->getReturnType())) {
919       getIRPosition() = IRPosition::argument(*UniqueRVArg);
920       Changed = IRAttribute::manifest(A);
921     }
922   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
923     // We can replace the returned value with the unique returned constant.
924     Value &AnchorValue = getAnchorValue();
925     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
926       for (const Use &U : F->uses())
927         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
928           if (CB->isCallee(&U)) {
929             Constant *RVCCast =
930                 CB->getType() == RVC->getType()
931                     ? RVC
932                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
933             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
934           }
935     } else {
936       assert(isa<CallBase>(AnchorValue) &&
937              "Expcected a function or call base anchor!");
938       Constant *RVCCast =
939           AnchorValue.getType() == RVC->getType()
940               ? RVC
941               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
942       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
943     }
944     if (Changed == ChangeStatus::CHANGED)
945       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
946                       "Number of function returns replaced by constant return");
947   }
948 
949   return Changed;
950 }
951 
952 const std::string AAReturnedValuesImpl::getAsStr() const {
953   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
954          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
955          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
956 }
957 
958 Optional<Value *>
959 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
960   // If checkForAllReturnedValues provides a unique value, ignoring potential
961   // undef values that can also be present, it is assumed to be the actual
962   // return value and forwarded to the caller of this method. If there are
963   // multiple, a nullptr is returned indicating there cannot be a unique
964   // returned value.
965   Optional<Value *> UniqueRV;
966 
967   auto Pred = [&](Value &RV) -> bool {
968     // If we found a second returned value and neither the current nor the saved
969     // one is an undef, there is no unique returned value. Undefs are special
970     // since we can pretend they have any value.
971     if (UniqueRV.hasValue() && UniqueRV != &RV &&
972         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
973       UniqueRV = nullptr;
974       return false;
975     }
976 
977     // Do not overwrite a value with an undef.
978     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
979       UniqueRV = &RV;
980 
981     return true;
982   };
983 
984   if (!A.checkForAllReturnedValues(Pred, *this))
985     UniqueRV = nullptr;
986 
987   return UniqueRV;
988 }
989 
990 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
991     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
992     const {
993   if (!isValidState())
994     return false;
995 
996   // Check all returned values but ignore call sites as long as we have not
997   // encountered an overdefined one during an update.
998   for (auto &It : ReturnedValues) {
999     Value *RV = It.first;
1000 
1001     CallBase *CB = dyn_cast<CallBase>(RV);
1002     if (CB && !UnresolvedCalls.count(CB))
1003       continue;
1004 
1005     if (!Pred(*RV, It.second))
1006       return false;
1007   }
1008 
1009   return true;
1010 }
1011 
1012 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1013   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1014   bool Changed = false;
1015 
1016   // State used in the value traversals starting in returned values.
1017   struct RVState {
1018     // The map in which we collect return values -> return instrs.
1019     decltype(ReturnedValues) &RetValsMap;
1020     // The flag to indicate a change.
1021     bool &Changed;
1022     // The return instrs we come from.
1023     SmallSetVector<ReturnInst *, 4> RetInsts;
1024   };
1025 
1026   // Callback for a leaf value returned by the associated function.
1027   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1028                          bool) -> bool {
1029     auto Size = RVS.RetValsMap[&Val].size();
1030     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1031     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1032     RVS.Changed |= Inserted;
1033     LLVM_DEBUG({
1034       if (Inserted)
1035         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1036                << " => " << RVS.RetInsts.size() << "\n";
1037     });
1038     return true;
1039   };
1040 
1041   // Helper method to invoke the generic value traversal.
1042   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1043                                 const Instruction *CtxI) {
1044     IRPosition RetValPos = IRPosition::value(RV);
1045     return genericValueTraversal<AAReturnedValues, RVState>(
1046         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1047         /* UseValueSimplify */ false);
1048   };
1049 
1050   // Callback for all "return intructions" live in the associated function.
1051   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1052     ReturnInst &Ret = cast<ReturnInst>(I);
1053     RVState RVS({ReturnedValues, Changed, {}});
1054     RVS.RetInsts.insert(&Ret);
1055     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1056   };
1057 
1058   // Start by discovering returned values from all live returned instructions in
1059   // the associated function.
1060   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1061     return indicatePessimisticFixpoint();
1062 
1063   // Once returned values "directly" present in the code are handled we try to
1064   // resolve returned calls. To avoid modifications to the ReturnedValues map
1065   // while we iterate over it we kept record of potential new entries in a copy
1066   // map, NewRVsMap.
1067   decltype(ReturnedValues) NewRVsMap;
1068 
1069   auto HandleReturnValue = [&](Value *RV,
1070                                SmallSetVector<ReturnInst *, 4> &RIs) {
1071     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1072                       << RIs.size() << " RIs\n");
1073     CallBase *CB = dyn_cast<CallBase>(RV);
1074     if (!CB || UnresolvedCalls.count(CB))
1075       return;
1076 
1077     if (!CB->getCalledFunction()) {
1078       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1079                         << "\n");
1080       UnresolvedCalls.insert(CB);
1081       return;
1082     }
1083 
1084     // TODO: use the function scope once we have call site AAReturnedValues.
1085     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1086         *this, IRPosition::function(*CB->getCalledFunction()));
1087     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1088                       << RetValAA << "\n");
1089 
1090     // Skip dead ends, thus if we do not know anything about the returned
1091     // call we mark it as unresolved and it will stay that way.
1092     if (!RetValAA.getState().isValidState()) {
1093       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1094                         << "\n");
1095       UnresolvedCalls.insert(CB);
1096       return;
1097     }
1098 
1099     // Do not try to learn partial information. If the callee has unresolved
1100     // return values we will treat the call as unresolved/opaque.
1101     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1102     if (!RetValAAUnresolvedCalls.empty()) {
1103       UnresolvedCalls.insert(CB);
1104       return;
1105     }
1106 
1107     // Now check if we can track transitively returned values. If possible, thus
1108     // if all return value can be represented in the current scope, do so.
1109     bool Unresolved = false;
1110     for (auto &RetValAAIt : RetValAA.returned_values()) {
1111       Value *RetVal = RetValAAIt.first;
1112       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1113           isa<Constant>(RetVal))
1114         continue;
1115       // Anything that did not fit in the above categories cannot be resolved,
1116       // mark the call as unresolved.
1117       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1118                            "cannot be translated: "
1119                         << *RetVal << "\n");
1120       UnresolvedCalls.insert(CB);
1121       Unresolved = true;
1122       break;
1123     }
1124 
1125     if (Unresolved)
1126       return;
1127 
1128     // Now track transitively returned values.
1129     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1130     if (NumRetAA == RetValAA.getNumReturnValues()) {
1131       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1132                            "changed since it was seen last\n");
1133       return;
1134     }
1135     NumRetAA = RetValAA.getNumReturnValues();
1136 
1137     for (auto &RetValAAIt : RetValAA.returned_values()) {
1138       Value *RetVal = RetValAAIt.first;
1139       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1140         // Arguments are mapped to call site operands and we begin the traversal
1141         // again.
1142         bool Unused = false;
1143         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1144         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1145         continue;
1146       } else if (isa<CallBase>(RetVal)) {
1147         // Call sites are resolved by the callee attribute over time, no need to
1148         // do anything for us.
1149         continue;
1150       } else if (isa<Constant>(RetVal)) {
1151         // Constants are valid everywhere, we can simply take them.
1152         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1153         continue;
1154       }
1155     }
1156   };
1157 
1158   for (auto &It : ReturnedValues)
1159     HandleReturnValue(It.first, It.second);
1160 
1161   // Because processing the new information can again lead to new return values
1162   // we have to be careful and iterate until this iteration is complete. The
1163   // idea is that we are in a stable state at the end of an update. All return
1164   // values have been handled and properly categorized. We might not update
1165   // again if we have not requested a non-fix attribute so we cannot "wait" for
1166   // the next update to analyze a new return value.
1167   while (!NewRVsMap.empty()) {
1168     auto It = std::move(NewRVsMap.back());
1169     NewRVsMap.pop_back();
1170 
1171     assert(!It.second.empty() && "Entry does not add anything.");
1172     auto &ReturnInsts = ReturnedValues[It.first];
1173     for (ReturnInst *RI : It.second)
1174       if (ReturnInsts.insert(RI)) {
1175         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1176                           << *It.first << " => " << *RI << "\n");
1177         HandleReturnValue(It.first, ReturnInsts);
1178         Changed = true;
1179       }
1180   }
1181 
1182   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1183   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1184 }
1185 
1186 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1187   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1188       : AAReturnedValuesImpl(IRP, A) {}
1189 
1190   /// See AbstractAttribute::trackStatistics()
1191   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1192 };
1193 
1194 /// Returned values information for a call sites.
1195 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1196   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1197       : AAReturnedValuesImpl(IRP, A) {}
1198 
1199   /// See AbstractAttribute::initialize(...).
1200   void initialize(Attributor &A) override {
1201     // TODO: Once we have call site specific value information we can provide
1202     //       call site specific liveness information and then it makes
1203     //       sense to specialize attributes for call sites instead of
1204     //       redirecting requests to the callee.
1205     llvm_unreachable("Abstract attributes for returned values are not "
1206                      "supported for call sites yet!");
1207   }
1208 
1209   /// See AbstractAttribute::updateImpl(...).
1210   ChangeStatus updateImpl(Attributor &A) override {
1211     return indicatePessimisticFixpoint();
1212   }
1213 
1214   /// See AbstractAttribute::trackStatistics()
1215   void trackStatistics() const override {}
1216 };
1217 
1218 /// ------------------------ NoSync Function Attribute -------------------------
1219 
1220 struct AANoSyncImpl : AANoSync {
1221   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1222 
1223   const std::string getAsStr() const override {
1224     return getAssumed() ? "nosync" : "may-sync";
1225   }
1226 
1227   /// See AbstractAttribute::updateImpl(...).
1228   ChangeStatus updateImpl(Attributor &A) override;
1229 
1230   /// Helper function used to determine whether an instruction is non-relaxed
1231   /// atomic. In other words, if an atomic instruction does not have unordered
1232   /// or monotonic ordering
1233   static bool isNonRelaxedAtomic(Instruction *I);
1234 
1235   /// Helper function used to determine whether an instruction is volatile.
1236   static bool isVolatile(Instruction *I);
1237 
1238   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1239   /// memset).
1240   static bool isNoSyncIntrinsic(Instruction *I);
1241 };
1242 
1243 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1244   if (!I->isAtomic())
1245     return false;
1246 
1247   AtomicOrdering Ordering;
1248   switch (I->getOpcode()) {
1249   case Instruction::AtomicRMW:
1250     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1251     break;
1252   case Instruction::Store:
1253     Ordering = cast<StoreInst>(I)->getOrdering();
1254     break;
1255   case Instruction::Load:
1256     Ordering = cast<LoadInst>(I)->getOrdering();
1257     break;
1258   case Instruction::Fence: {
1259     auto *FI = cast<FenceInst>(I);
1260     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1261       return false;
1262     Ordering = FI->getOrdering();
1263     break;
1264   }
1265   case Instruction::AtomicCmpXchg: {
1266     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1267     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1268     // Only if both are relaxed, than it can be treated as relaxed.
1269     // Otherwise it is non-relaxed.
1270     if (Success != AtomicOrdering::Unordered &&
1271         Success != AtomicOrdering::Monotonic)
1272       return true;
1273     if (Failure != AtomicOrdering::Unordered &&
1274         Failure != AtomicOrdering::Monotonic)
1275       return true;
1276     return false;
1277   }
1278   default:
1279     llvm_unreachable(
1280         "New atomic operations need to be known in the attributor.");
1281   }
1282 
1283   // Relaxed.
1284   if (Ordering == AtomicOrdering::Unordered ||
1285       Ordering == AtomicOrdering::Monotonic)
1286     return false;
1287   return true;
1288 }
1289 
1290 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1291 /// FIXME: We should ipmrove the handling of intrinsics.
1292 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1293   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1294     switch (II->getIntrinsicID()) {
1295     /// Element wise atomic memory intrinsics are can only be unordered,
1296     /// therefore nosync.
1297     case Intrinsic::memset_element_unordered_atomic:
1298     case Intrinsic::memmove_element_unordered_atomic:
1299     case Intrinsic::memcpy_element_unordered_atomic:
1300       return true;
1301     case Intrinsic::memset:
1302     case Intrinsic::memmove:
1303     case Intrinsic::memcpy:
1304       if (!cast<MemIntrinsic>(II)->isVolatile())
1305         return true;
1306       return false;
1307     default:
1308       return false;
1309     }
1310   }
1311   return false;
1312 }
1313 
1314 bool AANoSyncImpl::isVolatile(Instruction *I) {
1315   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1316 
1317   switch (I->getOpcode()) {
1318   case Instruction::AtomicRMW:
1319     return cast<AtomicRMWInst>(I)->isVolatile();
1320   case Instruction::Store:
1321     return cast<StoreInst>(I)->isVolatile();
1322   case Instruction::Load:
1323     return cast<LoadInst>(I)->isVolatile();
1324   case Instruction::AtomicCmpXchg:
1325     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1326   default:
1327     return false;
1328   }
1329 }
1330 
1331 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332 
1333   auto CheckRWInstForNoSync = [&](Instruction &I) {
1334     /// We are looking for volatile instructions or Non-Relaxed atomics.
1335     /// FIXME: We should improve the handling of intrinsics.
1336 
1337     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1338       return true;
1339 
1340     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1341       if (CB->hasFnAttr(Attribute::NoSync))
1342         return true;
1343 
1344       const auto &NoSyncAA =
1345           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1346       if (NoSyncAA.isAssumedNoSync())
1347         return true;
1348       return false;
1349     }
1350 
1351     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1352       return true;
1353 
1354     return false;
1355   };
1356 
1357   auto CheckForNoSync = [&](Instruction &I) {
1358     // At this point we handled all read/write effects and they are all
1359     // nosync, so they can be skipped.
1360     if (I.mayReadOrWriteMemory())
1361       return true;
1362 
1363     // non-convergent and readnone imply nosync.
1364     return !cast<CallBase>(I).isConvergent();
1365   };
1366 
1367   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1368       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1369     return indicatePessimisticFixpoint();
1370 
1371   return ChangeStatus::UNCHANGED;
1372 }
1373 
1374 struct AANoSyncFunction final : public AANoSyncImpl {
1375   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1376       : AANoSyncImpl(IRP, A) {}
1377 
1378   /// See AbstractAttribute::trackStatistics()
1379   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1380 };
1381 
1382 /// NoSync attribute deduction for a call sites.
1383 struct AANoSyncCallSite final : AANoSyncImpl {
1384   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1385       : AANoSyncImpl(IRP, A) {}
1386 
1387   /// See AbstractAttribute::initialize(...).
1388   void initialize(Attributor &A) override {
1389     AANoSyncImpl::initialize(A);
1390     Function *F = getAssociatedFunction();
1391     if (!F)
1392       indicatePessimisticFixpoint();
1393   }
1394 
1395   /// See AbstractAttribute::updateImpl(...).
1396   ChangeStatus updateImpl(Attributor &A) override {
1397     // TODO: Once we have call site specific value information we can provide
1398     //       call site specific liveness information and then it makes
1399     //       sense to specialize attributes for call sites arguments instead of
1400     //       redirecting requests to the callee argument.
1401     Function *F = getAssociatedFunction();
1402     const IRPosition &FnPos = IRPosition::function(*F);
1403     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1404     return clampStateAndIndicateChange(
1405         getState(), static_cast<const AANoSync::StateType &>(FnAA.getState()));
1406   }
1407 
1408   /// See AbstractAttribute::trackStatistics()
1409   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1410 };
1411 
1412 /// ------------------------ No-Free Attributes ----------------------------
1413 
1414 struct AANoFreeImpl : public AANoFree {
1415   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1416 
1417   /// See AbstractAttribute::updateImpl(...).
1418   ChangeStatus updateImpl(Attributor &A) override {
1419     auto CheckForNoFree = [&](Instruction &I) {
1420       const auto &CB = cast<CallBase>(I);
1421       if (CB.hasFnAttr(Attribute::NoFree))
1422         return true;
1423 
1424       const auto &NoFreeAA =
1425           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1426       return NoFreeAA.isAssumedNoFree();
1427     };
1428 
1429     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1430       return indicatePessimisticFixpoint();
1431     return ChangeStatus::UNCHANGED;
1432   }
1433 
1434   /// See AbstractAttribute::getAsStr().
1435   const std::string getAsStr() const override {
1436     return getAssumed() ? "nofree" : "may-free";
1437   }
1438 };
1439 
1440 struct AANoFreeFunction final : public AANoFreeImpl {
1441   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1442       : AANoFreeImpl(IRP, A) {}
1443 
1444   /// See AbstractAttribute::trackStatistics()
1445   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1446 };
1447 
1448 /// NoFree attribute deduction for a call sites.
1449 struct AANoFreeCallSite final : AANoFreeImpl {
1450   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1451       : AANoFreeImpl(IRP, A) {}
1452 
1453   /// See AbstractAttribute::initialize(...).
1454   void initialize(Attributor &A) override {
1455     AANoFreeImpl::initialize(A);
1456     Function *F = getAssociatedFunction();
1457     if (!F)
1458       indicatePessimisticFixpoint();
1459   }
1460 
1461   /// See AbstractAttribute::updateImpl(...).
1462   ChangeStatus updateImpl(Attributor &A) override {
1463     // TODO: Once we have call site specific value information we can provide
1464     //       call site specific liveness information and then it makes
1465     //       sense to specialize attributes for call sites arguments instead of
1466     //       redirecting requests to the callee argument.
1467     Function *F = getAssociatedFunction();
1468     const IRPosition &FnPos = IRPosition::function(*F);
1469     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1470     return clampStateAndIndicateChange(
1471         getState(), static_cast<const AANoFree::StateType &>(FnAA.getState()));
1472   }
1473 
1474   /// See AbstractAttribute::trackStatistics()
1475   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1476 };
1477 
1478 /// NoFree attribute for floating values.
1479 struct AANoFreeFloating : AANoFreeImpl {
1480   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1481       : AANoFreeImpl(IRP, A) {}
1482 
1483   /// See AbstractAttribute::trackStatistics()
1484   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1485 
1486   /// See Abstract Attribute::updateImpl(...).
1487   ChangeStatus updateImpl(Attributor &A) override {
1488     const IRPosition &IRP = getIRPosition();
1489 
1490     const auto &NoFreeAA =
1491         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1492     if (NoFreeAA.isAssumedNoFree())
1493       return ChangeStatus::UNCHANGED;
1494 
1495     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1496     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1497       Instruction *UserI = cast<Instruction>(U.getUser());
1498       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1499         if (CB->isBundleOperand(&U))
1500           return false;
1501         if (!CB->isArgOperand(&U))
1502           return true;
1503         unsigned ArgNo = CB->getArgOperandNo(&U);
1504 
1505         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1506             *this, IRPosition::callsite_argument(*CB, ArgNo));
1507         return NoFreeArg.isAssumedNoFree();
1508       }
1509 
1510       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1511           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1512         Follow = true;
1513         return true;
1514       }
1515       if (isa<ReturnInst>(UserI))
1516         return true;
1517 
1518       // Unknown user.
1519       return false;
1520     };
1521     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1522       return indicatePessimisticFixpoint();
1523 
1524     return ChangeStatus::UNCHANGED;
1525   }
1526 };
1527 
1528 /// NoFree attribute for a call site argument.
1529 struct AANoFreeArgument final : AANoFreeFloating {
1530   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1531       : AANoFreeFloating(IRP, A) {}
1532 
1533   /// See AbstractAttribute::trackStatistics()
1534   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1535 };
1536 
1537 /// NoFree attribute for call site arguments.
1538 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1539   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1540       : AANoFreeFloating(IRP, A) {}
1541 
1542   /// See AbstractAttribute::updateImpl(...).
1543   ChangeStatus updateImpl(Attributor &A) override {
1544     // TODO: Once we have call site specific value information we can provide
1545     //       call site specific liveness information and then it makes
1546     //       sense to specialize attributes for call sites arguments instead of
1547     //       redirecting requests to the callee argument.
1548     Argument *Arg = getAssociatedArgument();
1549     if (!Arg)
1550       return indicatePessimisticFixpoint();
1551     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1552     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1553     return clampStateAndIndicateChange(
1554         getState(), static_cast<const AANoFree::StateType &>(ArgAA.getState()));
1555   }
1556 
1557   /// See AbstractAttribute::trackStatistics()
1558   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1559 };
1560 
1561 /// NoFree attribute for function return value.
1562 struct AANoFreeReturned final : AANoFreeFloating {
1563   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1564       : AANoFreeFloating(IRP, A) {
1565     llvm_unreachable("NoFree is not applicable to function returns!");
1566   }
1567 
1568   /// See AbstractAttribute::initialize(...).
1569   void initialize(Attributor &A) override {
1570     llvm_unreachable("NoFree is not applicable to function returns!");
1571   }
1572 
1573   /// See AbstractAttribute::updateImpl(...).
1574   ChangeStatus updateImpl(Attributor &A) override {
1575     llvm_unreachable("NoFree is not applicable to function returns!");
1576   }
1577 
1578   /// See AbstractAttribute::trackStatistics()
1579   void trackStatistics() const override {}
1580 };
1581 
1582 /// NoFree attribute deduction for a call site return value.
1583 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1584   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1585       : AANoFreeFloating(IRP, A) {}
1586 
1587   ChangeStatus manifest(Attributor &A) override {
1588     return ChangeStatus::UNCHANGED;
1589   }
1590   /// See AbstractAttribute::trackStatistics()
1591   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1592 };
1593 
1594 /// ------------------------ NonNull Argument Attribute ------------------------
1595 static int64_t getKnownNonNullAndDerefBytesForUse(
1596     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1597     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1598   TrackUse = false;
1599 
1600   const Value *UseV = U->get();
1601   if (!UseV->getType()->isPointerTy())
1602     return 0;
1603 
1604   Type *PtrTy = UseV->getType();
1605   const Function *F = I->getFunction();
1606   bool NullPointerIsDefined =
1607       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1608   const DataLayout &DL = A.getInfoCache().getDL();
1609   if (const auto *CB = dyn_cast<CallBase>(I)) {
1610     if (CB->isBundleOperand(U)) {
1611       if (RetainedKnowledge RK = getKnowledgeFromUse(
1612               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1613         IsNonNull |=
1614             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1615         return RK.ArgValue;
1616       }
1617       return 0;
1618     }
1619 
1620     if (CB->isCallee(U)) {
1621       IsNonNull |= !NullPointerIsDefined;
1622       return 0;
1623     }
1624 
1625     unsigned ArgNo = CB->getArgOperandNo(U);
1626     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1627     // As long as we only use known information there is no need to track
1628     // dependences here.
1629     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1630                                                   /* TrackDependence */ false);
1631     IsNonNull |= DerefAA.isKnownNonNull();
1632     return DerefAA.getKnownDereferenceableBytes();
1633   }
1634 
1635   // We need to follow common pointer manipulation uses to the accesses they
1636   // feed into. We can try to be smart to avoid looking through things we do not
1637   // like for now, e.g., non-inbounds GEPs.
1638   if (isa<CastInst>(I)) {
1639     TrackUse = true;
1640     return 0;
1641   }
1642 
1643   if (isa<GetElementPtrInst>(I)) {
1644     TrackUse = true;
1645     return 0;
1646   }
1647 
1648   int64_t Offset;
1649   const Value *Base =
1650       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1651   if (Base) {
1652     if (Base == &AssociatedValue &&
1653         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1654       int64_t DerefBytes =
1655           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1656 
1657       IsNonNull |= !NullPointerIsDefined;
1658       return std::max(int64_t(0), DerefBytes);
1659     }
1660   }
1661 
1662   /// Corner case when an offset is 0.
1663   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1664                                               /*AllowNonInbounds*/ true);
1665   if (Base) {
1666     if (Offset == 0 && Base == &AssociatedValue &&
1667         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1668       int64_t DerefBytes =
1669           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1670       IsNonNull |= !NullPointerIsDefined;
1671       return std::max(int64_t(0), DerefBytes);
1672     }
1673   }
1674 
1675   return 0;
1676 }
1677 
1678 struct AANonNullImpl : AANonNull {
1679   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1680       : AANonNull(IRP, A),
1681         NullIsDefined(NullPointerIsDefined(
1682             getAnchorScope(),
1683             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1684 
1685   /// See AbstractAttribute::initialize(...).
1686   void initialize(Attributor &A) override {
1687     Value &V = getAssociatedValue();
1688     if (!NullIsDefined &&
1689         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1690                 /* IgnoreSubsumingPositions */ false, &A))
1691       indicateOptimisticFixpoint();
1692     else if (isa<ConstantPointerNull>(V))
1693       indicatePessimisticFixpoint();
1694     else
1695       AANonNull::initialize(A);
1696 
1697     bool CanBeNull = true;
1698     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull))
1699       if (!CanBeNull)
1700         indicateOptimisticFixpoint();
1701 
1702     if (!getState().isAtFixpoint())
1703       if (Instruction *CtxI = getCtxI())
1704         followUsesInMBEC(*this, A, getState(), *CtxI);
1705   }
1706 
1707   /// See followUsesInMBEC
1708   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1709                        AANonNull::StateType &State) {
1710     bool IsNonNull = false;
1711     bool TrackUse = false;
1712     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1713                                        IsNonNull, TrackUse);
1714     State.setKnown(IsNonNull);
1715     return TrackUse;
1716   }
1717 
1718   /// See AbstractAttribute::getAsStr().
1719   const std::string getAsStr() const override {
1720     return getAssumed() ? "nonnull" : "may-null";
1721   }
1722 
1723   /// Flag to determine if the underlying value can be null and still allow
1724   /// valid accesses.
1725   const bool NullIsDefined;
1726 };
1727 
1728 /// NonNull attribute for a floating value.
1729 struct AANonNullFloating : public AANonNullImpl {
1730   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1731       : AANonNullImpl(IRP, A) {}
1732 
1733   /// See AbstractAttribute::updateImpl(...).
1734   ChangeStatus updateImpl(Attributor &A) override {
1735     if (!NullIsDefined) {
1736       const auto &DerefAA =
1737           A.getAAFor<AADereferenceable>(*this, getIRPosition());
1738       if (DerefAA.getAssumedDereferenceableBytes())
1739         return ChangeStatus::UNCHANGED;
1740     }
1741 
1742     const DataLayout &DL = A.getDataLayout();
1743 
1744     DominatorTree *DT = nullptr;
1745     AssumptionCache *AC = nullptr;
1746     InformationCache &InfoCache = A.getInfoCache();
1747     if (const Function *Fn = getAnchorScope()) {
1748       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1749       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1750     }
1751 
1752     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1753                             AANonNull::StateType &T, bool Stripped) -> bool {
1754       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1755       if (!Stripped && this == &AA) {
1756         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1757           T.indicatePessimisticFixpoint();
1758       } else {
1759         // Use abstract attribute information.
1760         const AANonNull::StateType &NS =
1761             static_cast<const AANonNull::StateType &>(AA.getState());
1762         T ^= NS;
1763       }
1764       return T.isValidState();
1765     };
1766 
1767     StateType T;
1768     if (!genericValueTraversal<AANonNull, StateType>(
1769             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1770       return indicatePessimisticFixpoint();
1771 
1772     return clampStateAndIndicateChange(getState(), T);
1773   }
1774 
1775   /// See AbstractAttribute::trackStatistics()
1776   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1777 };
1778 
1779 /// NonNull attribute for function return value.
1780 struct AANonNullReturned final
1781     : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1782   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1783       : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP, A) {}
1784 
1785   /// See AbstractAttribute::trackStatistics()
1786   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1787 };
1788 
1789 /// NonNull attribute for function argument.
1790 struct AANonNullArgument final
1791     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1792   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1793       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1794 
1795   /// See AbstractAttribute::trackStatistics()
1796   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1797 };
1798 
1799 struct AANonNullCallSiteArgument final : AANonNullFloating {
1800   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1801       : AANonNullFloating(IRP, A) {}
1802 
1803   /// See AbstractAttribute::trackStatistics()
1804   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1805 };
1806 
1807 /// NonNull attribute for a call site return position.
1808 struct AANonNullCallSiteReturned final
1809     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1810   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1811       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1812 
1813   /// See AbstractAttribute::trackStatistics()
1814   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1815 };
1816 
1817 /// ------------------------ No-Recurse Attributes ----------------------------
1818 
1819 struct AANoRecurseImpl : public AANoRecurse {
1820   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1821 
1822   /// See AbstractAttribute::getAsStr()
1823   const std::string getAsStr() const override {
1824     return getAssumed() ? "norecurse" : "may-recurse";
1825   }
1826 };
1827 
1828 struct AANoRecurseFunction final : AANoRecurseImpl {
1829   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1830       : AANoRecurseImpl(IRP, A) {}
1831 
1832   /// See AbstractAttribute::initialize(...).
1833   void initialize(Attributor &A) override {
1834     AANoRecurseImpl::initialize(A);
1835     if (const Function *F = getAnchorScope())
1836       if (A.getInfoCache().getSccSize(*F) != 1)
1837         indicatePessimisticFixpoint();
1838   }
1839 
1840   /// See AbstractAttribute::updateImpl(...).
1841   ChangeStatus updateImpl(Attributor &A) override {
1842 
1843     // If all live call sites are known to be no-recurse, we are as well.
1844     auto CallSitePred = [&](AbstractCallSite ACS) {
1845       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1846           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1847           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1848       return NoRecurseAA.isKnownNoRecurse();
1849     };
1850     bool AllCallSitesKnown;
1851     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1852       // If we know all call sites and all are known no-recurse, we are done.
1853       // If all known call sites, which might not be all that exist, are known
1854       // to be no-recurse, we are not done but we can continue to assume
1855       // no-recurse. If one of the call sites we have not visited will become
1856       // live, another update is triggered.
1857       if (AllCallSitesKnown)
1858         indicateOptimisticFixpoint();
1859       return ChangeStatus::UNCHANGED;
1860     }
1861 
1862     // If the above check does not hold anymore we look at the calls.
1863     auto CheckForNoRecurse = [&](Instruction &I) {
1864       const auto &CB = cast<CallBase>(I);
1865       if (CB.hasFnAttr(Attribute::NoRecurse))
1866         return true;
1867 
1868       const auto &NoRecurseAA =
1869           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1870       if (!NoRecurseAA.isAssumedNoRecurse())
1871         return false;
1872 
1873       // Recursion to the same function
1874       if (CB.getCalledFunction() == getAnchorScope())
1875         return false;
1876 
1877       return true;
1878     };
1879 
1880     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1881       return indicatePessimisticFixpoint();
1882     return ChangeStatus::UNCHANGED;
1883   }
1884 
1885   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1886 };
1887 
1888 /// NoRecurse attribute deduction for a call sites.
1889 struct AANoRecurseCallSite final : AANoRecurseImpl {
1890   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1891       : AANoRecurseImpl(IRP, A) {}
1892 
1893   /// See AbstractAttribute::initialize(...).
1894   void initialize(Attributor &A) override {
1895     AANoRecurseImpl::initialize(A);
1896     Function *F = getAssociatedFunction();
1897     if (!F)
1898       indicatePessimisticFixpoint();
1899   }
1900 
1901   /// See AbstractAttribute::updateImpl(...).
1902   ChangeStatus updateImpl(Attributor &A) override {
1903     // TODO: Once we have call site specific value information we can provide
1904     //       call site specific liveness information and then it makes
1905     //       sense to specialize attributes for call sites arguments instead of
1906     //       redirecting requests to the callee argument.
1907     Function *F = getAssociatedFunction();
1908     const IRPosition &FnPos = IRPosition::function(*F);
1909     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1910     return clampStateAndIndicateChange(
1911         getState(),
1912         static_cast<const AANoRecurse::StateType &>(FnAA.getState()));
1913   }
1914 
1915   /// See AbstractAttribute::trackStatistics()
1916   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1917 };
1918 
1919 /// -------------------- Undefined-Behavior Attributes ------------------------
1920 
1921 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1922   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1923       : AAUndefinedBehavior(IRP, A) {}
1924 
1925   /// See AbstractAttribute::updateImpl(...).
1926   // through a pointer (i.e. also branches etc.)
1927   ChangeStatus updateImpl(Attributor &A) override {
1928     const size_t UBPrevSize = KnownUBInsts.size();
1929     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1930 
1931     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1932       // Skip instructions that are already saved.
1933       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1934         return true;
1935 
1936       // If we reach here, we know we have an instruction
1937       // that accesses memory through a pointer operand,
1938       // for which getPointerOperand() should give it to us.
1939       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1940       assert(PtrOp &&
1941              "Expected pointer operand of memory accessing instruction");
1942 
1943       // Either we stopped and the appropriate action was taken,
1944       // or we got back a simplified value to continue.
1945       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1946       if (!SimplifiedPtrOp.hasValue())
1947         return true;
1948       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1949 
1950       // A memory access through a pointer is considered UB
1951       // only if the pointer has constant null value.
1952       // TODO: Expand it to not only check constant values.
1953       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1954         AssumedNoUBInsts.insert(&I);
1955         return true;
1956       }
1957       const Type *PtrTy = PtrOpVal->getType();
1958 
1959       // Because we only consider instructions inside functions,
1960       // assume that a parent function exists.
1961       const Function *F = I.getFunction();
1962 
1963       // A memory access using constant null pointer is only considered UB
1964       // if null pointer is _not_ defined for the target platform.
1965       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1966         AssumedNoUBInsts.insert(&I);
1967       else
1968         KnownUBInsts.insert(&I);
1969       return true;
1970     };
1971 
1972     auto InspectBrInstForUB = [&](Instruction &I) {
1973       // A conditional branch instruction is considered UB if it has `undef`
1974       // condition.
1975 
1976       // Skip instructions that are already saved.
1977       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1978         return true;
1979 
1980       // We know we have a branch instruction.
1981       auto BrInst = cast<BranchInst>(&I);
1982 
1983       // Unconditional branches are never considered UB.
1984       if (BrInst->isUnconditional())
1985         return true;
1986 
1987       // Either we stopped and the appropriate action was taken,
1988       // or we got back a simplified value to continue.
1989       Optional<Value *> SimplifiedCond =
1990           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1991       if (!SimplifiedCond.hasValue())
1992         return true;
1993       AssumedNoUBInsts.insert(&I);
1994       return true;
1995     };
1996 
1997     auto InspectCallSiteForUB = [&](Instruction &I) {
1998       // Check whether a callsite always cause UB or not
1999 
2000       // Skip instructions that are already saved.
2001       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2002         return true;
2003 
2004       // Check nonnull and noundef argument attribute violation for each
2005       // callsite.
2006       CallBase &CB = cast<CallBase>(I);
2007       Function *Callee = CB.getCalledFunction();
2008       if (!Callee)
2009         return true;
2010       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2011         // If current argument is known to be simplified to null pointer and the
2012         // corresponding argument position is known to have nonnull attribute,
2013         // the argument is poison. Furthermore, if the argument is poison and
2014         // the position is known to have noundef attriubte, this callsite is
2015         // considered UB.
2016         // TODO: Check also nopoison attribute if it is introduced.
2017         if (idx >= Callee->arg_size())
2018           break;
2019         Value *ArgVal = CB.getArgOperand(idx);
2020         if (!ArgVal)
2021           continue;
2022         IRPosition CalleeArgumentIRP =
2023             IRPosition::argument(*Callee->getArg(idx));
2024         if (!CalleeArgumentIRP.hasAttr({Attribute::NoUndef}))
2025           continue;
2026         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP);
2027         if (!NonNullAA.isKnownNonNull())
2028           continue;
2029         const auto &ValueSimplifyAA =
2030             A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*ArgVal));
2031         Optional<Value *> SimplifiedVal =
2032             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2033 
2034         if (!ValueSimplifyAA.isKnown())
2035           continue;
2036         // Here, we handle three cases.
2037         //   (1) Not having a value means it is dead. (we can replace the value
2038         //       with undef)
2039         //   (2) Simplified to null pointer. The argument is a poison value and
2040         //       violate noundef attribute.
2041         //   (3) Simplified to undef. The argument violate noundef attriubte.
2042         if (!SimplifiedVal.hasValue() ||
2043             isa<ConstantPointerNull>(*SimplifiedVal.getValue()) ||
2044             isa<UndefValue>(*SimplifiedVal.getValue())) {
2045           KnownUBInsts.insert(&I);
2046           return true;
2047         }
2048       }
2049       return true;
2050     };
2051 
2052     auto InspectReturnInstForUB =
2053         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2054           // Check if a return instruction always cause UB or not
2055           // Note: It is guaranteed that the returned position of the anchor
2056           //       scope has noundef attribute when this is called.
2057 
2058           // When the returned position has noundef attriubte, UB occur in the
2059           // following cases.
2060           //   (1) Returned value is known to be undef.
2061           //   (2) The value is known to be a null pointer and the returned
2062           //       position has nonnull attribute (because the returned value is
2063           //       poison).
2064           // Note: This callback is not called for a dead returned value because
2065           //       such values are ignored in
2066           //       checkForAllReturnedValuesAndReturnedInsts.
2067           bool FoundUB = false;
2068           if (isa<UndefValue>(V)) {
2069             FoundUB = true;
2070           } else {
2071             auto &NonNullAA = A.getAAFor<AANonNull>(
2072                 *this, IRPosition::returned(*getAnchorScope()));
2073             if (NonNullAA.isKnownNonNull() && isa<ConstantPointerNull>(V))
2074               FoundUB = true;
2075           }
2076 
2077           if (FoundUB)
2078             for (ReturnInst *RI : RetInsts)
2079               KnownUBInsts.insert(RI);
2080           return true;
2081         };
2082 
2083     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2084                               {Instruction::Load, Instruction::Store,
2085                                Instruction::AtomicCmpXchg,
2086                                Instruction::AtomicRMW},
2087                               /* CheckBBLivenessOnly */ true);
2088     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2089                               /* CheckBBLivenessOnly */ true);
2090     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2091 
2092     // If the returned position of the anchor scope has noundef attriubte, check
2093     // all returned instructions.
2094     // TODO: If AANoUndef is implemented, ask it here.
2095     if (IRPosition::returned(*getAnchorScope()).hasAttr({Attribute::NoUndef}))
2096       A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB, *this);
2097 
2098     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2099         UBPrevSize != KnownUBInsts.size())
2100       return ChangeStatus::CHANGED;
2101     return ChangeStatus::UNCHANGED;
2102   }
2103 
2104   bool isKnownToCauseUB(Instruction *I) const override {
2105     return KnownUBInsts.count(I);
2106   }
2107 
2108   bool isAssumedToCauseUB(Instruction *I) const override {
2109     // In simple words, if an instruction is not in the assumed to _not_
2110     // cause UB, then it is assumed UB (that includes those
2111     // in the KnownUBInsts set). The rest is boilerplate
2112     // is to ensure that it is one of the instructions we test
2113     // for UB.
2114 
2115     switch (I->getOpcode()) {
2116     case Instruction::Load:
2117     case Instruction::Store:
2118     case Instruction::AtomicCmpXchg:
2119     case Instruction::AtomicRMW:
2120       return !AssumedNoUBInsts.count(I);
2121     case Instruction::Br: {
2122       auto BrInst = cast<BranchInst>(I);
2123       if (BrInst->isUnconditional())
2124         return false;
2125       return !AssumedNoUBInsts.count(I);
2126     } break;
2127     default:
2128       return false;
2129     }
2130     return false;
2131   }
2132 
2133   ChangeStatus manifest(Attributor &A) override {
2134     if (KnownUBInsts.empty())
2135       return ChangeStatus::UNCHANGED;
2136     for (Instruction *I : KnownUBInsts)
2137       A.changeToUnreachableAfterManifest(I);
2138     return ChangeStatus::CHANGED;
2139   }
2140 
2141   /// See AbstractAttribute::getAsStr()
2142   const std::string getAsStr() const override {
2143     return getAssumed() ? "undefined-behavior" : "no-ub";
2144   }
2145 
2146   /// Note: The correctness of this analysis depends on the fact that the
2147   /// following 2 sets will stop changing after some point.
2148   /// "Change" here means that their size changes.
2149   /// The size of each set is monotonically increasing
2150   /// (we only add items to them) and it is upper bounded by the number of
2151   /// instructions in the processed function (we can never save more
2152   /// elements in either set than this number). Hence, at some point,
2153   /// they will stop increasing.
2154   /// Consequently, at some point, both sets will have stopped
2155   /// changing, effectively making the analysis reach a fixpoint.
2156 
2157   /// Note: These 2 sets are disjoint and an instruction can be considered
2158   /// one of 3 things:
2159   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2160   ///    the KnownUBInsts set.
2161   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2162   ///    has a reason to assume it).
2163   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2164   ///    could not find a reason to assume or prove that it can cause UB,
2165   ///    hence it assumes it doesn't. We have a set for these instructions
2166   ///    so that we don't reprocess them in every update.
2167   ///    Note however that instructions in this set may cause UB.
2168 
2169 protected:
2170   /// A set of all live instructions _known_ to cause UB.
2171   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2172 
2173 private:
2174   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2175   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2176 
2177   // Should be called on updates in which if we're processing an instruction
2178   // \p I that depends on a value \p V, one of the following has to happen:
2179   // - If the value is assumed, then stop.
2180   // - If the value is known but undef, then consider it UB.
2181   // - Otherwise, do specific processing with the simplified value.
2182   // We return None in the first 2 cases to signify that an appropriate
2183   // action was taken and the caller should stop.
2184   // Otherwise, we return the simplified value that the caller should
2185   // use for specific processing.
2186   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2187                                          Instruction *I) {
2188     const auto &ValueSimplifyAA =
2189         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2190     Optional<Value *> SimplifiedV =
2191         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2192     if (!ValueSimplifyAA.isKnown()) {
2193       // Don't depend on assumed values.
2194       return llvm::None;
2195     }
2196     if (!SimplifiedV.hasValue()) {
2197       // If it is known (which we tested above) but it doesn't have a value,
2198       // then we can assume `undef` and hence the instruction is UB.
2199       KnownUBInsts.insert(I);
2200       return llvm::None;
2201     }
2202     Value *Val = SimplifiedV.getValue();
2203     if (isa<UndefValue>(Val)) {
2204       KnownUBInsts.insert(I);
2205       return llvm::None;
2206     }
2207     return Val;
2208   }
2209 };
2210 
2211 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2212   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2213       : AAUndefinedBehaviorImpl(IRP, A) {}
2214 
2215   /// See AbstractAttribute::trackStatistics()
2216   void trackStatistics() const override {
2217     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2218                "Number of instructions known to have UB");
2219     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2220         KnownUBInsts.size();
2221   }
2222 };
2223 
2224 /// ------------------------ Will-Return Attributes ----------------------------
2225 
2226 // Helper function that checks whether a function has any cycle which we don't
2227 // know if it is bounded or not.
2228 // Loops with maximum trip count are considered bounded, any other cycle not.
2229 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2230   ScalarEvolution *SE =
2231       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2232   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2233   // If either SCEV or LoopInfo is not available for the function then we assume
2234   // any cycle to be unbounded cycle.
2235   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2236   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2237   if (!SE || !LI) {
2238     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2239       if (SCCI.hasCycle())
2240         return true;
2241     return false;
2242   }
2243 
2244   // If there's irreducible control, the function may contain non-loop cycles.
2245   if (mayContainIrreducibleControl(F, LI))
2246     return true;
2247 
2248   // Any loop that does not have a max trip count is considered unbounded cycle.
2249   for (auto *L : LI->getLoopsInPreorder()) {
2250     if (!SE->getSmallConstantMaxTripCount(L))
2251       return true;
2252   }
2253   return false;
2254 }
2255 
2256 struct AAWillReturnImpl : public AAWillReturn {
2257   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2258       : AAWillReturn(IRP, A) {}
2259 
2260   /// See AbstractAttribute::initialize(...).
2261   void initialize(Attributor &A) override {
2262     AAWillReturn::initialize(A);
2263 
2264     Function *F = getAnchorScope();
2265     if (!F || !A.isFunctionIPOAmendable(*F) || mayContainUnboundedCycle(*F, A))
2266       indicatePessimisticFixpoint();
2267   }
2268 
2269   /// See AbstractAttribute::updateImpl(...).
2270   ChangeStatus updateImpl(Attributor &A) override {
2271     auto CheckForWillReturn = [&](Instruction &I) {
2272       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2273       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2274       if (WillReturnAA.isKnownWillReturn())
2275         return true;
2276       if (!WillReturnAA.isAssumedWillReturn())
2277         return false;
2278       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2279       return NoRecurseAA.isAssumedNoRecurse();
2280     };
2281 
2282     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2283       return indicatePessimisticFixpoint();
2284 
2285     return ChangeStatus::UNCHANGED;
2286   }
2287 
2288   /// See AbstractAttribute::getAsStr()
2289   const std::string getAsStr() const override {
2290     return getAssumed() ? "willreturn" : "may-noreturn";
2291   }
2292 };
2293 
2294 struct AAWillReturnFunction final : AAWillReturnImpl {
2295   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2296       : AAWillReturnImpl(IRP, A) {}
2297 
2298   /// See AbstractAttribute::trackStatistics()
2299   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2300 };
2301 
2302 /// WillReturn attribute deduction for a call sites.
2303 struct AAWillReturnCallSite final : AAWillReturnImpl {
2304   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2305       : AAWillReturnImpl(IRP, A) {}
2306 
2307   /// See AbstractAttribute::initialize(...).
2308   void initialize(Attributor &A) override {
2309     AAWillReturnImpl::initialize(A);
2310     Function *F = getAssociatedFunction();
2311     if (!F)
2312       indicatePessimisticFixpoint();
2313   }
2314 
2315   /// See AbstractAttribute::updateImpl(...).
2316   ChangeStatus updateImpl(Attributor &A) override {
2317     // TODO: Once we have call site specific value information we can provide
2318     //       call site specific liveness information and then it makes
2319     //       sense to specialize attributes for call sites arguments instead of
2320     //       redirecting requests to the callee argument.
2321     Function *F = getAssociatedFunction();
2322     const IRPosition &FnPos = IRPosition::function(*F);
2323     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2324     return clampStateAndIndicateChange(
2325         getState(),
2326         static_cast<const AAWillReturn::StateType &>(FnAA.getState()));
2327   }
2328 
2329   /// See AbstractAttribute::trackStatistics()
2330   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2331 };
2332 
2333 /// -------------------AAReachability Attribute--------------------------
2334 
2335 struct AAReachabilityImpl : AAReachability {
2336   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2337       : AAReachability(IRP, A) {}
2338 
2339   const std::string getAsStr() const override {
2340     // TODO: Return the number of reachable queries.
2341     return "reachable";
2342   }
2343 
2344   /// See AbstractAttribute::initialize(...).
2345   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2346 
2347   /// See AbstractAttribute::updateImpl(...).
2348   ChangeStatus updateImpl(Attributor &A) override {
2349     return indicatePessimisticFixpoint();
2350   }
2351 };
2352 
2353 struct AAReachabilityFunction final : public AAReachabilityImpl {
2354   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2355       : AAReachabilityImpl(IRP, A) {}
2356 
2357   /// See AbstractAttribute::trackStatistics()
2358   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2359 };
2360 
2361 /// ------------------------ NoAlias Argument Attribute ------------------------
2362 
2363 struct AANoAliasImpl : AANoAlias {
2364   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2365     assert(getAssociatedType()->isPointerTy() &&
2366            "Noalias is a pointer attribute");
2367   }
2368 
2369   const std::string getAsStr() const override {
2370     return getAssumed() ? "noalias" : "may-alias";
2371   }
2372 };
2373 
2374 /// NoAlias attribute for a floating value.
2375 struct AANoAliasFloating final : AANoAliasImpl {
2376   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2377       : AANoAliasImpl(IRP, A) {}
2378 
2379   /// See AbstractAttribute::initialize(...).
2380   void initialize(Attributor &A) override {
2381     AANoAliasImpl::initialize(A);
2382     Value *Val = &getAssociatedValue();
2383     do {
2384       CastInst *CI = dyn_cast<CastInst>(Val);
2385       if (!CI)
2386         break;
2387       Value *Base = CI->getOperand(0);
2388       if (!Base->hasOneUse())
2389         break;
2390       Val = Base;
2391     } while (true);
2392 
2393     if (!Val->getType()->isPointerTy()) {
2394       indicatePessimisticFixpoint();
2395       return;
2396     }
2397 
2398     if (isa<AllocaInst>(Val))
2399       indicateOptimisticFixpoint();
2400     else if (isa<ConstantPointerNull>(Val) &&
2401              !NullPointerIsDefined(getAnchorScope(),
2402                                    Val->getType()->getPointerAddressSpace()))
2403       indicateOptimisticFixpoint();
2404     else if (Val != &getAssociatedValue()) {
2405       const auto &ValNoAliasAA =
2406           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2407       if (ValNoAliasAA.isKnownNoAlias())
2408         indicateOptimisticFixpoint();
2409     }
2410   }
2411 
2412   /// See AbstractAttribute::updateImpl(...).
2413   ChangeStatus updateImpl(Attributor &A) override {
2414     // TODO: Implement this.
2415     return indicatePessimisticFixpoint();
2416   }
2417 
2418   /// See AbstractAttribute::trackStatistics()
2419   void trackStatistics() const override {
2420     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2421   }
2422 };
2423 
2424 /// NoAlias attribute for an argument.
2425 struct AANoAliasArgument final
2426     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2427   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2428   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2429 
2430   /// See AbstractAttribute::initialize(...).
2431   void initialize(Attributor &A) override {
2432     Base::initialize(A);
2433     // See callsite argument attribute and callee argument attribute.
2434     if (hasAttr({Attribute::ByVal}))
2435       indicateOptimisticFixpoint();
2436   }
2437 
2438   /// See AbstractAttribute::update(...).
2439   ChangeStatus updateImpl(Attributor &A) override {
2440     // We have to make sure no-alias on the argument does not break
2441     // synchronization when this is a callback argument, see also [1] below.
2442     // If synchronization cannot be affected, we delegate to the base updateImpl
2443     // function, otherwise we give up for now.
2444 
2445     // If the function is no-sync, no-alias cannot break synchronization.
2446     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2447         *this, IRPosition::function_scope(getIRPosition()));
2448     if (NoSyncAA.isAssumedNoSync())
2449       return Base::updateImpl(A);
2450 
2451     // If the argument is read-only, no-alias cannot break synchronization.
2452     const auto &MemBehaviorAA =
2453         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2454     if (MemBehaviorAA.isAssumedReadOnly())
2455       return Base::updateImpl(A);
2456 
2457     // If the argument is never passed through callbacks, no-alias cannot break
2458     // synchronization.
2459     bool AllCallSitesKnown;
2460     if (A.checkForAllCallSites(
2461             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2462             true, AllCallSitesKnown))
2463       return Base::updateImpl(A);
2464 
2465     // TODO: add no-alias but make sure it doesn't break synchronization by
2466     // introducing fake uses. See:
2467     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2468     //     International Workshop on OpenMP 2018,
2469     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2470 
2471     return indicatePessimisticFixpoint();
2472   }
2473 
2474   /// See AbstractAttribute::trackStatistics()
2475   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2476 };
2477 
2478 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2479   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2480       : AANoAliasImpl(IRP, A) {}
2481 
2482   /// See AbstractAttribute::initialize(...).
2483   void initialize(Attributor &A) override {
2484     // See callsite argument attribute and callee argument attribute.
2485     const auto &CB = cast<CallBase>(getAnchorValue());
2486     if (CB.paramHasAttr(getArgNo(), Attribute::NoAlias))
2487       indicateOptimisticFixpoint();
2488     Value &Val = getAssociatedValue();
2489     if (isa<ConstantPointerNull>(Val) &&
2490         !NullPointerIsDefined(getAnchorScope(),
2491                               Val.getType()->getPointerAddressSpace()))
2492       indicateOptimisticFixpoint();
2493   }
2494 
2495   /// Determine if the underlying value may alias with the call site argument
2496   /// \p OtherArgNo of \p ICS (= the underlying call site).
2497   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2498                             const AAMemoryBehavior &MemBehaviorAA,
2499                             const CallBase &CB, unsigned OtherArgNo) {
2500     // We do not need to worry about aliasing with the underlying IRP.
2501     if (this->getArgNo() == (int)OtherArgNo)
2502       return false;
2503 
2504     // If it is not a pointer or pointer vector we do not alias.
2505     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2506     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2507       return false;
2508 
2509     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2510         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2511         /* TrackDependence */ false);
2512 
2513     // If the argument is readnone, there is no read-write aliasing.
2514     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2515       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2516       return false;
2517     }
2518 
2519     // If the argument is readonly and the underlying value is readonly, there
2520     // is no read-write aliasing.
2521     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2522     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2523       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2524       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2525       return false;
2526     }
2527 
2528     // We have to utilize actual alias analysis queries so we need the object.
2529     if (!AAR)
2530       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2531 
2532     // Try to rule it out at the call site.
2533     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2534     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2535                          "callsite arguments: "
2536                       << getAssociatedValue() << " " << *ArgOp << " => "
2537                       << (IsAliasing ? "" : "no-") << "alias \n");
2538 
2539     return IsAliasing;
2540   }
2541 
2542   bool
2543   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2544                                          const AAMemoryBehavior &MemBehaviorAA,
2545                                          const AANoAlias &NoAliasAA) {
2546     // We can deduce "noalias" if the following conditions hold.
2547     // (i)   Associated value is assumed to be noalias in the definition.
2548     // (ii)  Associated value is assumed to be no-capture in all the uses
2549     //       possibly executed before this callsite.
2550     // (iii) There is no other pointer argument which could alias with the
2551     //       value.
2552 
2553     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2554     if (!AssociatedValueIsNoAliasAtDef) {
2555       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2556                         << " is not no-alias at the definition\n");
2557       return false;
2558     }
2559 
2560     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2561 
2562     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2563     auto &NoCaptureAA =
2564         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2565     // Check whether the value is captured in the scope using AANoCapture.
2566     //      Look at CFG and check only uses possibly executed before this
2567     //      callsite.
2568     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2569       Instruction *UserI = cast<Instruction>(U.getUser());
2570 
2571       // If user if curr instr and only use.
2572       if (UserI == getCtxI() && UserI->hasOneUse())
2573         return true;
2574 
2575       const Function *ScopeFn = VIRP.getAnchorScope();
2576       if (ScopeFn) {
2577         const auto &ReachabilityAA =
2578             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2579 
2580         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2581           return true;
2582 
2583         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2584           if (CB->isArgOperand(&U)) {
2585 
2586             unsigned ArgNo = CB->getArgOperandNo(&U);
2587 
2588             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2589                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2590 
2591             if (NoCaptureAA.isAssumedNoCapture())
2592               return true;
2593           }
2594         }
2595       }
2596 
2597       // For cases which can potentially have more users
2598       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2599           isa<SelectInst>(U)) {
2600         Follow = true;
2601         return true;
2602       }
2603 
2604       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2605       return false;
2606     };
2607 
2608     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2609       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2610         LLVM_DEBUG(
2611             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2612                    << " cannot be noalias as it is potentially captured\n");
2613         return false;
2614       }
2615     }
2616     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2617 
2618     // Check there is no other pointer argument which could alias with the
2619     // value passed at this call site.
2620     // TODO: AbstractCallSite
2621     const auto &CB = cast<CallBase>(getAnchorValue());
2622     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2623          OtherArgNo++)
2624       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2625         return false;
2626 
2627     return true;
2628   }
2629 
2630   /// See AbstractAttribute::updateImpl(...).
2631   ChangeStatus updateImpl(Attributor &A) override {
2632     // If the argument is readnone we are done as there are no accesses via the
2633     // argument.
2634     auto &MemBehaviorAA =
2635         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2636                                      /* TrackDependence */ false);
2637     if (MemBehaviorAA.isAssumedReadNone()) {
2638       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2639       return ChangeStatus::UNCHANGED;
2640     }
2641 
2642     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2643     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2644                                                   /* TrackDependence */ false);
2645 
2646     AAResults *AAR = nullptr;
2647     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2648                                                NoAliasAA)) {
2649       LLVM_DEBUG(
2650           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2651       return ChangeStatus::UNCHANGED;
2652     }
2653 
2654     return indicatePessimisticFixpoint();
2655   }
2656 
2657   /// See AbstractAttribute::trackStatistics()
2658   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2659 };
2660 
2661 /// NoAlias attribute for function return value.
2662 struct AANoAliasReturned final : AANoAliasImpl {
2663   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2664       : AANoAliasImpl(IRP, A) {}
2665 
2666   /// See AbstractAttribute::updateImpl(...).
2667   virtual ChangeStatus updateImpl(Attributor &A) override {
2668 
2669     auto CheckReturnValue = [&](Value &RV) -> bool {
2670       if (Constant *C = dyn_cast<Constant>(&RV))
2671         if (C->isNullValue() || isa<UndefValue>(C))
2672           return true;
2673 
2674       /// For now, we can only deduce noalias if we have call sites.
2675       /// FIXME: add more support.
2676       if (!isa<CallBase>(&RV))
2677         return false;
2678 
2679       const IRPosition &RVPos = IRPosition::value(RV);
2680       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2681       if (!NoAliasAA.isAssumedNoAlias())
2682         return false;
2683 
2684       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2685       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2686     };
2687 
2688     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2689       return indicatePessimisticFixpoint();
2690 
2691     return ChangeStatus::UNCHANGED;
2692   }
2693 
2694   /// See AbstractAttribute::trackStatistics()
2695   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2696 };
2697 
2698 /// NoAlias attribute deduction for a call site return value.
2699 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2700   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2701       : AANoAliasImpl(IRP, A) {}
2702 
2703   /// See AbstractAttribute::initialize(...).
2704   void initialize(Attributor &A) override {
2705     AANoAliasImpl::initialize(A);
2706     Function *F = getAssociatedFunction();
2707     if (!F)
2708       indicatePessimisticFixpoint();
2709   }
2710 
2711   /// See AbstractAttribute::updateImpl(...).
2712   ChangeStatus updateImpl(Attributor &A) override {
2713     // TODO: Once we have call site specific value information we can provide
2714     //       call site specific liveness information and then it makes
2715     //       sense to specialize attributes for call sites arguments instead of
2716     //       redirecting requests to the callee argument.
2717     Function *F = getAssociatedFunction();
2718     const IRPosition &FnPos = IRPosition::returned(*F);
2719     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2720     return clampStateAndIndicateChange(
2721         getState(), static_cast<const AANoAlias::StateType &>(FnAA.getState()));
2722   }
2723 
2724   /// See AbstractAttribute::trackStatistics()
2725   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2726 };
2727 
2728 /// -------------------AAIsDead Function Attribute-----------------------
2729 
2730 struct AAIsDeadValueImpl : public AAIsDead {
2731   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2732 
2733   /// See AAIsDead::isAssumedDead().
2734   bool isAssumedDead() const override { return getAssumed(); }
2735 
2736   /// See AAIsDead::isKnownDead().
2737   bool isKnownDead() const override { return getKnown(); }
2738 
2739   /// See AAIsDead::isAssumedDead(BasicBlock *).
2740   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2741 
2742   /// See AAIsDead::isKnownDead(BasicBlock *).
2743   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2744 
2745   /// See AAIsDead::isAssumedDead(Instruction *I).
2746   bool isAssumedDead(const Instruction *I) const override {
2747     return I == getCtxI() && isAssumedDead();
2748   }
2749 
2750   /// See AAIsDead::isKnownDead(Instruction *I).
2751   bool isKnownDead(const Instruction *I) const override {
2752     return isAssumedDead(I) && getKnown();
2753   }
2754 
2755   /// See AbstractAttribute::getAsStr().
2756   const std::string getAsStr() const override {
2757     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2758   }
2759 
2760   /// Check if all uses are assumed dead.
2761   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2762     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2763     // Explicitly set the dependence class to required because we want a long
2764     // chain of N dependent instructions to be considered live as soon as one is
2765     // without going through N update cycles. This is not required for
2766     // correctness.
2767     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2768   }
2769 
2770   /// Determine if \p I is assumed to be side-effect free.
2771   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2772     if (!I || wouldInstructionBeTriviallyDead(I))
2773       return true;
2774 
2775     auto *CB = dyn_cast<CallBase>(I);
2776     if (!CB || isa<IntrinsicInst>(CB))
2777       return false;
2778 
2779     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2780     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2781         *this, CallIRP, /* TrackDependence */ false);
2782     if (!NoUnwindAA.isAssumedNoUnwind())
2783       return false;
2784     if (!NoUnwindAA.isKnownNoUnwind())
2785       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2786 
2787     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2788         *this, CallIRP, /* TrackDependence */ false);
2789     if (MemBehaviorAA.isAssumedReadOnly()) {
2790       if (!MemBehaviorAA.isKnownReadOnly())
2791         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2792       return true;
2793     }
2794     return false;
2795   }
2796 };
2797 
2798 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2799   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2800       : AAIsDeadValueImpl(IRP, A) {}
2801 
2802   /// See AbstractAttribute::initialize(...).
2803   void initialize(Attributor &A) override {
2804     if (isa<UndefValue>(getAssociatedValue())) {
2805       indicatePessimisticFixpoint();
2806       return;
2807     }
2808 
2809     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2810     if (!isAssumedSideEffectFree(A, I))
2811       indicatePessimisticFixpoint();
2812   }
2813 
2814   /// See AbstractAttribute::updateImpl(...).
2815   ChangeStatus updateImpl(Attributor &A) override {
2816     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2817     if (!isAssumedSideEffectFree(A, I))
2818       return indicatePessimisticFixpoint();
2819 
2820     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2821       return indicatePessimisticFixpoint();
2822     return ChangeStatus::UNCHANGED;
2823   }
2824 
2825   /// See AbstractAttribute::manifest(...).
2826   ChangeStatus manifest(Attributor &A) override {
2827     Value &V = getAssociatedValue();
2828     if (auto *I = dyn_cast<Instruction>(&V)) {
2829       // If we get here we basically know the users are all dead. We check if
2830       // isAssumedSideEffectFree returns true here again because it might not be
2831       // the case and only the users are dead but the instruction (=call) is
2832       // still needed.
2833       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2834         A.deleteAfterManifest(*I);
2835         return ChangeStatus::CHANGED;
2836       }
2837     }
2838     if (V.use_empty())
2839       return ChangeStatus::UNCHANGED;
2840 
2841     bool UsedAssumedInformation = false;
2842     Optional<Constant *> C =
2843         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2844     if (C.hasValue() && C.getValue())
2845       return ChangeStatus::UNCHANGED;
2846 
2847     // Replace the value with undef as it is dead but keep droppable uses around
2848     // as they provide information we don't want to give up on just yet.
2849     UndefValue &UV = *UndefValue::get(V.getType());
2850     bool AnyChange =
2851         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2852     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2853   }
2854 
2855   /// See AbstractAttribute::trackStatistics()
2856   void trackStatistics() const override {
2857     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2858   }
2859 };
2860 
2861 struct AAIsDeadArgument : public AAIsDeadFloating {
2862   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2863       : AAIsDeadFloating(IRP, A) {}
2864 
2865   /// See AbstractAttribute::initialize(...).
2866   void initialize(Attributor &A) override {
2867     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2868       indicatePessimisticFixpoint();
2869   }
2870 
2871   /// See AbstractAttribute::manifest(...).
2872   ChangeStatus manifest(Attributor &A) override {
2873     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2874     Argument &Arg = *getAssociatedArgument();
2875     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2876       if (A.registerFunctionSignatureRewrite(
2877               Arg, /* ReplacementTypes */ {},
2878               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2879               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2880         Arg.dropDroppableUses();
2881         return ChangeStatus::CHANGED;
2882       }
2883     return Changed;
2884   }
2885 
2886   /// See AbstractAttribute::trackStatistics()
2887   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2888 };
2889 
2890 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2891   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2892       : AAIsDeadValueImpl(IRP, A) {}
2893 
2894   /// See AbstractAttribute::initialize(...).
2895   void initialize(Attributor &A) override {
2896     if (isa<UndefValue>(getAssociatedValue()))
2897       indicatePessimisticFixpoint();
2898   }
2899 
2900   /// See AbstractAttribute::updateImpl(...).
2901   ChangeStatus updateImpl(Attributor &A) override {
2902     // TODO: Once we have call site specific value information we can provide
2903     //       call site specific liveness information and then it makes
2904     //       sense to specialize attributes for call sites arguments instead of
2905     //       redirecting requests to the callee argument.
2906     Argument *Arg = getAssociatedArgument();
2907     if (!Arg)
2908       return indicatePessimisticFixpoint();
2909     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2910     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2911     return clampStateAndIndicateChange(
2912         getState(), static_cast<const AAIsDead::StateType &>(ArgAA.getState()));
2913   }
2914 
2915   /// See AbstractAttribute::manifest(...).
2916   ChangeStatus manifest(Attributor &A) override {
2917     CallBase &CB = cast<CallBase>(getAnchorValue());
2918     Use &U = CB.getArgOperandUse(getArgNo());
2919     assert(!isa<UndefValue>(U.get()) &&
2920            "Expected undef values to be filtered out!");
2921     UndefValue &UV = *UndefValue::get(U->getType());
2922     if (A.changeUseAfterManifest(U, UV))
2923       return ChangeStatus::CHANGED;
2924     return ChangeStatus::UNCHANGED;
2925   }
2926 
2927   /// See AbstractAttribute::trackStatistics()
2928   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2929 };
2930 
2931 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2932   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2933       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2934 
2935   /// See AAIsDead::isAssumedDead().
2936   bool isAssumedDead() const override {
2937     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2938   }
2939 
2940   /// See AbstractAttribute::initialize(...).
2941   void initialize(Attributor &A) override {
2942     if (isa<UndefValue>(getAssociatedValue())) {
2943       indicatePessimisticFixpoint();
2944       return;
2945     }
2946 
2947     // We track this separately as a secondary state.
2948     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2949   }
2950 
2951   /// See AbstractAttribute::updateImpl(...).
2952   ChangeStatus updateImpl(Attributor &A) override {
2953     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2954     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2955       IsAssumedSideEffectFree = false;
2956       Changed = ChangeStatus::CHANGED;
2957     }
2958 
2959     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2960       return indicatePessimisticFixpoint();
2961     return Changed;
2962   }
2963 
2964   /// See AbstractAttribute::trackStatistics()
2965   void trackStatistics() const override {
2966     if (IsAssumedSideEffectFree)
2967       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2968     else
2969       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2970   }
2971 
2972   /// See AbstractAttribute::getAsStr().
2973   const std::string getAsStr() const override {
2974     return isAssumedDead()
2975                ? "assumed-dead"
2976                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
2977   }
2978 
2979 private:
2980   bool IsAssumedSideEffectFree;
2981 };
2982 
2983 struct AAIsDeadReturned : public AAIsDeadValueImpl {
2984   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
2985       : AAIsDeadValueImpl(IRP, A) {}
2986 
2987   /// See AbstractAttribute::updateImpl(...).
2988   ChangeStatus updateImpl(Attributor &A) override {
2989 
2990     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
2991                               {Instruction::Ret});
2992 
2993     auto PredForCallSite = [&](AbstractCallSite ACS) {
2994       if (ACS.isCallbackCall() || !ACS.getInstruction())
2995         return false;
2996       return areAllUsesAssumedDead(A, *ACS.getInstruction());
2997     };
2998 
2999     bool AllCallSitesKnown;
3000     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3001                                 AllCallSitesKnown))
3002       return indicatePessimisticFixpoint();
3003 
3004     return ChangeStatus::UNCHANGED;
3005   }
3006 
3007   /// See AbstractAttribute::manifest(...).
3008   ChangeStatus manifest(Attributor &A) override {
3009     // TODO: Rewrite the signature to return void?
3010     bool AnyChange = false;
3011     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3012     auto RetInstPred = [&](Instruction &I) {
3013       ReturnInst &RI = cast<ReturnInst>(I);
3014       if (!isa<UndefValue>(RI.getReturnValue()))
3015         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3016       return true;
3017     };
3018     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3019     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3020   }
3021 
3022   /// See AbstractAttribute::trackStatistics()
3023   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3024 };
3025 
3026 struct AAIsDeadFunction : public AAIsDead {
3027   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3028 
3029   /// See AbstractAttribute::initialize(...).
3030   void initialize(Attributor &A) override {
3031     const Function *F = getAnchorScope();
3032     if (F && !F->isDeclaration()) {
3033       ToBeExploredFrom.insert(&F->getEntryBlock().front());
3034       assumeLive(A, F->getEntryBlock());
3035     }
3036   }
3037 
3038   /// See AbstractAttribute::getAsStr().
3039   const std::string getAsStr() const override {
3040     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3041            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3042            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3043            std::to_string(KnownDeadEnds.size()) + "]";
3044   }
3045 
3046   /// See AbstractAttribute::manifest(...).
3047   ChangeStatus manifest(Attributor &A) override {
3048     assert(getState().isValidState() &&
3049            "Attempted to manifest an invalid state!");
3050 
3051     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3052     Function &F = *getAnchorScope();
3053 
3054     if (AssumedLiveBlocks.empty()) {
3055       A.deleteAfterManifest(F);
3056       return ChangeStatus::CHANGED;
3057     }
3058 
3059     // Flag to determine if we can change an invoke to a call assuming the
3060     // callee is nounwind. This is not possible if the personality of the
3061     // function allows to catch asynchronous exceptions.
3062     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3063 
3064     KnownDeadEnds.set_union(ToBeExploredFrom);
3065     for (const Instruction *DeadEndI : KnownDeadEnds) {
3066       auto *CB = dyn_cast<CallBase>(DeadEndI);
3067       if (!CB)
3068         continue;
3069       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3070           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3071           DepClassTy::OPTIONAL);
3072       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3073       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3074         continue;
3075 
3076       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3077         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3078       else
3079         A.changeToUnreachableAfterManifest(
3080             const_cast<Instruction *>(DeadEndI->getNextNode()));
3081       HasChanged = ChangeStatus::CHANGED;
3082     }
3083 
3084     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3085     for (BasicBlock &BB : F)
3086       if (!AssumedLiveBlocks.count(&BB)) {
3087         A.deleteAfterManifest(BB);
3088         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3089       }
3090 
3091     return HasChanged;
3092   }
3093 
3094   /// See AbstractAttribute::updateImpl(...).
3095   ChangeStatus updateImpl(Attributor &A) override;
3096 
3097   /// See AbstractAttribute::trackStatistics()
3098   void trackStatistics() const override {}
3099 
3100   /// Returns true if the function is assumed dead.
3101   bool isAssumedDead() const override { return false; }
3102 
3103   /// See AAIsDead::isKnownDead().
3104   bool isKnownDead() const override { return false; }
3105 
3106   /// See AAIsDead::isAssumedDead(BasicBlock *).
3107   bool isAssumedDead(const BasicBlock *BB) const override {
3108     assert(BB->getParent() == getAnchorScope() &&
3109            "BB must be in the same anchor scope function.");
3110 
3111     if (!getAssumed())
3112       return false;
3113     return !AssumedLiveBlocks.count(BB);
3114   }
3115 
3116   /// See AAIsDead::isKnownDead(BasicBlock *).
3117   bool isKnownDead(const BasicBlock *BB) const override {
3118     return getKnown() && isAssumedDead(BB);
3119   }
3120 
3121   /// See AAIsDead::isAssumed(Instruction *I).
3122   bool isAssumedDead(const Instruction *I) const override {
3123     assert(I->getParent()->getParent() == getAnchorScope() &&
3124            "Instruction must be in the same anchor scope function.");
3125 
3126     if (!getAssumed())
3127       return false;
3128 
3129     // If it is not in AssumedLiveBlocks then it for sure dead.
3130     // Otherwise, it can still be after noreturn call in a live block.
3131     if (!AssumedLiveBlocks.count(I->getParent()))
3132       return true;
3133 
3134     // If it is not after a liveness barrier it is live.
3135     const Instruction *PrevI = I->getPrevNode();
3136     while (PrevI) {
3137       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3138         return true;
3139       PrevI = PrevI->getPrevNode();
3140     }
3141     return false;
3142   }
3143 
3144   /// See AAIsDead::isKnownDead(Instruction *I).
3145   bool isKnownDead(const Instruction *I) const override {
3146     return getKnown() && isAssumedDead(I);
3147   }
3148 
3149   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3150   /// that internal function called from \p BB should now be looked at.
3151   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3152     if (!AssumedLiveBlocks.insert(&BB).second)
3153       return false;
3154 
3155     // We assume that all of BB is (probably) live now and if there are calls to
3156     // internal functions we will assume that those are now live as well. This
3157     // is a performance optimization for blocks with calls to a lot of internal
3158     // functions. It can however cause dead functions to be treated as live.
3159     for (const Instruction &I : BB)
3160       if (const auto *CB = dyn_cast<CallBase>(&I))
3161         if (const Function *F = CB->getCalledFunction())
3162           if (F->hasLocalLinkage())
3163             A.markLiveInternalFunction(*F);
3164     return true;
3165   }
3166 
3167   /// Collection of instructions that need to be explored again, e.g., we
3168   /// did assume they do not transfer control to (one of their) successors.
3169   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3170 
3171   /// Collection of instructions that are known to not transfer control.
3172   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3173 
3174   /// Collection of all assumed live BasicBlocks.
3175   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3176 };
3177 
3178 static bool
3179 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3180                         AbstractAttribute &AA,
3181                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3182   const IRPosition &IPos = IRPosition::callsite_function(CB);
3183 
3184   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3185       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3186   if (NoReturnAA.isAssumedNoReturn())
3187     return !NoReturnAA.isKnownNoReturn();
3188   if (CB.isTerminator())
3189     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3190   else
3191     AliveSuccessors.push_back(CB.getNextNode());
3192   return false;
3193 }
3194 
3195 static bool
3196 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3197                         AbstractAttribute &AA,
3198                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3199   bool UsedAssumedInformation =
3200       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3201 
3202   // First, determine if we can change an invoke to a call assuming the
3203   // callee is nounwind. This is not possible if the personality of the
3204   // function allows to catch asynchronous exceptions.
3205   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3206     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3207   } else {
3208     const IRPosition &IPos = IRPosition::callsite_function(II);
3209     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3210         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3211     if (AANoUnw.isAssumedNoUnwind()) {
3212       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3213     } else {
3214       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3215     }
3216   }
3217   return UsedAssumedInformation;
3218 }
3219 
3220 static bool
3221 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3222                         AbstractAttribute &AA,
3223                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3224   bool UsedAssumedInformation = false;
3225   if (BI.getNumSuccessors() == 1) {
3226     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3227   } else {
3228     Optional<ConstantInt *> CI = getAssumedConstantInt(
3229         A, *BI.getCondition(), AA, UsedAssumedInformation);
3230     if (!CI.hasValue()) {
3231       // No value yet, assume both edges are dead.
3232     } else if (CI.getValue()) {
3233       const BasicBlock *SuccBB =
3234           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3235       AliveSuccessors.push_back(&SuccBB->front());
3236     } else {
3237       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3238       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3239       UsedAssumedInformation = false;
3240     }
3241   }
3242   return UsedAssumedInformation;
3243 }
3244 
3245 static bool
3246 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3247                         AbstractAttribute &AA,
3248                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3249   bool UsedAssumedInformation = false;
3250   Optional<ConstantInt *> CI =
3251       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3252   if (!CI.hasValue()) {
3253     // No value yet, assume all edges are dead.
3254   } else if (CI.getValue()) {
3255     for (auto &CaseIt : SI.cases()) {
3256       if (CaseIt.getCaseValue() == CI.getValue()) {
3257         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3258         return UsedAssumedInformation;
3259       }
3260     }
3261     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3262     return UsedAssumedInformation;
3263   } else {
3264     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3265       AliveSuccessors.push_back(&SuccBB->front());
3266   }
3267   return UsedAssumedInformation;
3268 }
3269 
3270 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3271   ChangeStatus Change = ChangeStatus::UNCHANGED;
3272 
3273   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3274                     << getAnchorScope()->size() << "] BBs and "
3275                     << ToBeExploredFrom.size() << " exploration points and "
3276                     << KnownDeadEnds.size() << " known dead ends\n");
3277 
3278   // Copy and clear the list of instructions we need to explore from. It is
3279   // refilled with instructions the next update has to look at.
3280   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3281                                                ToBeExploredFrom.end());
3282   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3283 
3284   SmallVector<const Instruction *, 8> AliveSuccessors;
3285   while (!Worklist.empty()) {
3286     const Instruction *I = Worklist.pop_back_val();
3287     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3288 
3289     AliveSuccessors.clear();
3290 
3291     bool UsedAssumedInformation = false;
3292     switch (I->getOpcode()) {
3293     // TODO: look for (assumed) UB to backwards propagate "deadness".
3294     default:
3295       if (I->isTerminator()) {
3296         for (const BasicBlock *SuccBB : successors(I->getParent()))
3297           AliveSuccessors.push_back(&SuccBB->front());
3298       } else {
3299         AliveSuccessors.push_back(I->getNextNode());
3300       }
3301       break;
3302     case Instruction::Call:
3303       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3304                                                        *this, AliveSuccessors);
3305       break;
3306     case Instruction::Invoke:
3307       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3308                                                        *this, AliveSuccessors);
3309       break;
3310     case Instruction::Br:
3311       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3312                                                        *this, AliveSuccessors);
3313       break;
3314     case Instruction::Switch:
3315       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3316                                                        *this, AliveSuccessors);
3317       break;
3318     }
3319 
3320     if (UsedAssumedInformation) {
3321       NewToBeExploredFrom.insert(I);
3322     } else {
3323       Change = ChangeStatus::CHANGED;
3324       if (AliveSuccessors.empty() ||
3325           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3326         KnownDeadEnds.insert(I);
3327     }
3328 
3329     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3330                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3331                       << UsedAssumedInformation << "\n");
3332 
3333     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3334       if (!I->isTerminator()) {
3335         assert(AliveSuccessors.size() == 1 &&
3336                "Non-terminator expected to have a single successor!");
3337         Worklist.push_back(AliveSuccessor);
3338       } else {
3339         if (assumeLive(A, *AliveSuccessor->getParent()))
3340           Worklist.push_back(AliveSuccessor);
3341       }
3342     }
3343   }
3344 
3345   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3346 
3347   // If we know everything is live there is no need to query for liveness.
3348   // Instead, indicating a pessimistic fixpoint will cause the state to be
3349   // "invalid" and all queries to be answered conservatively without lookups.
3350   // To be in this state we have to (1) finished the exploration and (3) not
3351   // discovered any non-trivial dead end and (2) not ruled unreachable code
3352   // dead.
3353   if (ToBeExploredFrom.empty() &&
3354       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3355       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3356         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3357       }))
3358     return indicatePessimisticFixpoint();
3359   return Change;
3360 }
3361 
3362 /// Liveness information for a call sites.
3363 struct AAIsDeadCallSite final : AAIsDeadFunction {
3364   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3365       : AAIsDeadFunction(IRP, A) {}
3366 
3367   /// See AbstractAttribute::initialize(...).
3368   void initialize(Attributor &A) override {
3369     // TODO: Once we have call site specific value information we can provide
3370     //       call site specific liveness information and then it makes
3371     //       sense to specialize attributes for call sites instead of
3372     //       redirecting requests to the callee.
3373     llvm_unreachable("Abstract attributes for liveness are not "
3374                      "supported for call sites yet!");
3375   }
3376 
3377   /// See AbstractAttribute::updateImpl(...).
3378   ChangeStatus updateImpl(Attributor &A) override {
3379     return indicatePessimisticFixpoint();
3380   }
3381 
3382   /// See AbstractAttribute::trackStatistics()
3383   void trackStatistics() const override {}
3384 };
3385 
3386 /// -------------------- Dereferenceable Argument Attribute --------------------
3387 
3388 template <>
3389 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3390                                                      const DerefState &R) {
3391   ChangeStatus CS0 =
3392       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3393   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3394   return CS0 | CS1;
3395 }
3396 
3397 struct AADereferenceableImpl : AADereferenceable {
3398   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3399       : AADereferenceable(IRP, A) {}
3400   using StateType = DerefState;
3401 
3402   /// See AbstractAttribute::initialize(...).
3403   void initialize(Attributor &A) override {
3404     SmallVector<Attribute, 4> Attrs;
3405     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3406              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3407     for (const Attribute &Attr : Attrs)
3408       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3409 
3410     const IRPosition &IRP = this->getIRPosition();
3411     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3412                                        /* TrackDependence */ false);
3413 
3414     bool CanBeNull;
3415     takeKnownDerefBytesMaximum(
3416         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3417             A.getDataLayout(), CanBeNull));
3418 
3419     bool IsFnInterface = IRP.isFnInterfaceKind();
3420     Function *FnScope = IRP.getAnchorScope();
3421     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3422       indicatePessimisticFixpoint();
3423       return;
3424     }
3425 
3426     if (Instruction *CtxI = getCtxI())
3427       followUsesInMBEC(*this, A, getState(), *CtxI);
3428   }
3429 
3430   /// See AbstractAttribute::getState()
3431   /// {
3432   StateType &getState() override { return *this; }
3433   const StateType &getState() const override { return *this; }
3434   /// }
3435 
3436   /// Helper function for collecting accessed bytes in must-be-executed-context
3437   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3438                               DerefState &State) {
3439     const Value *UseV = U->get();
3440     if (!UseV->getType()->isPointerTy())
3441       return;
3442 
3443     Type *PtrTy = UseV->getType();
3444     const DataLayout &DL = A.getDataLayout();
3445     int64_t Offset;
3446     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3447             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3448       if (Base == &getAssociatedValue() &&
3449           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3450         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3451         State.addAccessedBytes(Offset, Size);
3452       }
3453     }
3454     return;
3455   }
3456 
3457   /// See followUsesInMBEC
3458   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3459                        AADereferenceable::StateType &State) {
3460     bool IsNonNull = false;
3461     bool TrackUse = false;
3462     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3463         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3464     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3465                       << " for instruction " << *I << "\n");
3466 
3467     addAccessedBytesForUse(A, U, I, State);
3468     State.takeKnownDerefBytesMaximum(DerefBytes);
3469     return TrackUse;
3470   }
3471 
3472   /// See AbstractAttribute::manifest(...).
3473   ChangeStatus manifest(Attributor &A) override {
3474     ChangeStatus Change = AADereferenceable::manifest(A);
3475     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3476       removeAttrs({Attribute::DereferenceableOrNull});
3477       return ChangeStatus::CHANGED;
3478     }
3479     return Change;
3480   }
3481 
3482   void getDeducedAttributes(LLVMContext &Ctx,
3483                             SmallVectorImpl<Attribute> &Attrs) const override {
3484     // TODO: Add *_globally support
3485     if (isAssumedNonNull())
3486       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3487           Ctx, getAssumedDereferenceableBytes()));
3488     else
3489       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3490           Ctx, getAssumedDereferenceableBytes()));
3491   }
3492 
3493   /// See AbstractAttribute::getAsStr().
3494   const std::string getAsStr() const override {
3495     if (!getAssumedDereferenceableBytes())
3496       return "unknown-dereferenceable";
3497     return std::string("dereferenceable") +
3498            (isAssumedNonNull() ? "" : "_or_null") +
3499            (isAssumedGlobal() ? "_globally" : "") + "<" +
3500            std::to_string(getKnownDereferenceableBytes()) + "-" +
3501            std::to_string(getAssumedDereferenceableBytes()) + ">";
3502   }
3503 };
3504 
3505 /// Dereferenceable attribute for a floating value.
3506 struct AADereferenceableFloating : AADereferenceableImpl {
3507   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3508       : AADereferenceableImpl(IRP, A) {}
3509 
3510   /// See AbstractAttribute::updateImpl(...).
3511   ChangeStatus updateImpl(Attributor &A) override {
3512     const DataLayout &DL = A.getDataLayout();
3513 
3514     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3515                             bool Stripped) -> bool {
3516       unsigned IdxWidth =
3517           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3518       APInt Offset(IdxWidth, 0);
3519       const Value *Base =
3520           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3521 
3522       const auto &AA =
3523           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3524       int64_t DerefBytes = 0;
3525       if (!Stripped && this == &AA) {
3526         // Use IR information if we did not strip anything.
3527         // TODO: track globally.
3528         bool CanBeNull;
3529         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3530         T.GlobalState.indicatePessimisticFixpoint();
3531       } else {
3532         const DerefState &DS = static_cast<const DerefState &>(AA.getState());
3533         DerefBytes = DS.DerefBytesState.getAssumed();
3534         T.GlobalState &= DS.GlobalState;
3535       }
3536 
3537       // For now we do not try to "increase" dereferenceability due to negative
3538       // indices as we first have to come up with code to deal with loops and
3539       // for overflows of the dereferenceable bytes.
3540       int64_t OffsetSExt = Offset.getSExtValue();
3541       if (OffsetSExt < 0)
3542         OffsetSExt = 0;
3543 
3544       T.takeAssumedDerefBytesMinimum(
3545           std::max(int64_t(0), DerefBytes - OffsetSExt));
3546 
3547       if (this == &AA) {
3548         if (!Stripped) {
3549           // If nothing was stripped IR information is all we got.
3550           T.takeKnownDerefBytesMaximum(
3551               std::max(int64_t(0), DerefBytes - OffsetSExt));
3552           T.indicatePessimisticFixpoint();
3553         } else if (OffsetSExt > 0) {
3554           // If something was stripped but there is circular reasoning we look
3555           // for the offset. If it is positive we basically decrease the
3556           // dereferenceable bytes in a circluar loop now, which will simply
3557           // drive them down to the known value in a very slow way which we
3558           // can accelerate.
3559           T.indicatePessimisticFixpoint();
3560         }
3561       }
3562 
3563       return T.isValidState();
3564     };
3565 
3566     DerefState T;
3567     if (!genericValueTraversal<AADereferenceable, DerefState>(
3568             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3569       return indicatePessimisticFixpoint();
3570 
3571     return clampStateAndIndicateChange(getState(), T);
3572   }
3573 
3574   /// See AbstractAttribute::trackStatistics()
3575   void trackStatistics() const override {
3576     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3577   }
3578 };
3579 
3580 /// Dereferenceable attribute for a return value.
3581 struct AADereferenceableReturned final
3582     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3583   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3584       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3585             IRP, A) {}
3586 
3587   /// See AbstractAttribute::trackStatistics()
3588   void trackStatistics() const override {
3589     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3590   }
3591 };
3592 
3593 /// Dereferenceable attribute for an argument
3594 struct AADereferenceableArgument final
3595     : AAArgumentFromCallSiteArguments<AADereferenceable,
3596                                       AADereferenceableImpl> {
3597   using Base =
3598       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3599   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3600       : Base(IRP, A) {}
3601 
3602   /// See AbstractAttribute::trackStatistics()
3603   void trackStatistics() const override {
3604     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3605   }
3606 };
3607 
3608 /// Dereferenceable attribute for a call site argument.
3609 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3610   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3611       : AADereferenceableFloating(IRP, A) {}
3612 
3613   /// See AbstractAttribute::trackStatistics()
3614   void trackStatistics() const override {
3615     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3616   }
3617 };
3618 
3619 /// Dereferenceable attribute deduction for a call site return value.
3620 struct AADereferenceableCallSiteReturned final
3621     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3622   using Base =
3623       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3624   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3625       : Base(IRP, A) {}
3626 
3627   /// See AbstractAttribute::trackStatistics()
3628   void trackStatistics() const override {
3629     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3630   }
3631 };
3632 
3633 // ------------------------ Align Argument Attribute ------------------------
3634 
3635 static unsigned getKnownAlignForUse(Attributor &A,
3636                                     AbstractAttribute &QueryingAA,
3637                                     Value &AssociatedValue, const Use *U,
3638                                     const Instruction *I, bool &TrackUse) {
3639   // We need to follow common pointer manipulation uses to the accesses they
3640   // feed into.
3641   if (isa<CastInst>(I)) {
3642     // Follow all but ptr2int casts.
3643     TrackUse = !isa<PtrToIntInst>(I);
3644     return 0;
3645   }
3646   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3647     if (GEP->hasAllConstantIndices()) {
3648       TrackUse = true;
3649       return 0;
3650     }
3651   }
3652 
3653   MaybeAlign MA;
3654   if (const auto *CB = dyn_cast<CallBase>(I)) {
3655     if (CB->isBundleOperand(U) || CB->isCallee(U))
3656       return 0;
3657 
3658     unsigned ArgNo = CB->getArgOperandNo(U);
3659     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3660     // As long as we only use known information there is no need to track
3661     // dependences here.
3662     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3663                                         /* TrackDependence */ false);
3664     MA = MaybeAlign(AlignAA.getKnownAlign());
3665   }
3666 
3667   const DataLayout &DL = A.getDataLayout();
3668   const Value *UseV = U->get();
3669   if (auto *SI = dyn_cast<StoreInst>(I)) {
3670     if (SI->getPointerOperand() == UseV)
3671       MA = SI->getAlign();
3672   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3673     if (LI->getPointerOperand() == UseV)
3674       MA = LI->getAlign();
3675   }
3676 
3677   if (!MA || *MA <= 1)
3678     return 0;
3679 
3680   unsigned Alignment = MA->value();
3681   int64_t Offset;
3682 
3683   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3684     if (Base == &AssociatedValue) {
3685       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3686       // So we can say that the maximum power of two which is a divisor of
3687       // gcd(Offset, Alignment) is an alignment.
3688 
3689       uint32_t gcd =
3690           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3691       Alignment = llvm::PowerOf2Floor(gcd);
3692     }
3693   }
3694 
3695   return Alignment;
3696 }
3697 
3698 struct AAAlignImpl : AAAlign {
3699   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3700 
3701   /// See AbstractAttribute::initialize(...).
3702   void initialize(Attributor &A) override {
3703     SmallVector<Attribute, 4> Attrs;
3704     getAttrs({Attribute::Alignment}, Attrs);
3705     for (const Attribute &Attr : Attrs)
3706       takeKnownMaximum(Attr.getValueAsInt());
3707 
3708     Value &V = getAssociatedValue();
3709     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3710     //       use of the function pointer. This was caused by D73131. We want to
3711     //       avoid this for function pointers especially because we iterate
3712     //       their uses and int2ptr is not handled. It is not a correctness
3713     //       problem though!
3714     if (!V.getType()->getPointerElementType()->isFunctionTy())
3715       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3716 
3717     if (getIRPosition().isFnInterfaceKind() &&
3718         (!getAnchorScope() ||
3719          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3720       indicatePessimisticFixpoint();
3721       return;
3722     }
3723 
3724     if (Instruction *CtxI = getCtxI())
3725       followUsesInMBEC(*this, A, getState(), *CtxI);
3726   }
3727 
3728   /// See AbstractAttribute::manifest(...).
3729   ChangeStatus manifest(Attributor &A) override {
3730     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3731 
3732     // Check for users that allow alignment annotations.
3733     Value &AssociatedValue = getAssociatedValue();
3734     for (const Use &U : AssociatedValue.uses()) {
3735       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3736         if (SI->getPointerOperand() == &AssociatedValue)
3737           if (SI->getAlignment() < getAssumedAlign()) {
3738             STATS_DECLTRACK(AAAlign, Store,
3739                             "Number of times alignment added to a store");
3740             SI->setAlignment(Align(getAssumedAlign()));
3741             LoadStoreChanged = ChangeStatus::CHANGED;
3742           }
3743       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3744         if (LI->getPointerOperand() == &AssociatedValue)
3745           if (LI->getAlignment() < getAssumedAlign()) {
3746             LI->setAlignment(Align(getAssumedAlign()));
3747             STATS_DECLTRACK(AAAlign, Load,
3748                             "Number of times alignment added to a load");
3749             LoadStoreChanged = ChangeStatus::CHANGED;
3750           }
3751       }
3752     }
3753 
3754     ChangeStatus Changed = AAAlign::manifest(A);
3755 
3756     Align InheritAlign =
3757         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3758     if (InheritAlign >= getAssumedAlign())
3759       return LoadStoreChanged;
3760     return Changed | LoadStoreChanged;
3761   }
3762 
3763   // TODO: Provide a helper to determine the implied ABI alignment and check in
3764   //       the existing manifest method and a new one for AAAlignImpl that value
3765   //       to avoid making the alignment explicit if it did not improve.
3766 
3767   /// See AbstractAttribute::getDeducedAttributes
3768   virtual void
3769   getDeducedAttributes(LLVMContext &Ctx,
3770                        SmallVectorImpl<Attribute> &Attrs) const override {
3771     if (getAssumedAlign() > 1)
3772       Attrs.emplace_back(
3773           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3774   }
3775 
3776   /// See followUsesInMBEC
3777   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3778                        AAAlign::StateType &State) {
3779     bool TrackUse = false;
3780 
3781     unsigned int KnownAlign =
3782         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3783     State.takeKnownMaximum(KnownAlign);
3784 
3785     return TrackUse;
3786   }
3787 
3788   /// See AbstractAttribute::getAsStr().
3789   const std::string getAsStr() const override {
3790     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3791                                 "-" + std::to_string(getAssumedAlign()) + ">")
3792                              : "unknown-align";
3793   }
3794 };
3795 
3796 /// Align attribute for a floating value.
3797 struct AAAlignFloating : AAAlignImpl {
3798   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3799 
3800   /// See AbstractAttribute::updateImpl(...).
3801   ChangeStatus updateImpl(Attributor &A) override {
3802     const DataLayout &DL = A.getDataLayout();
3803 
3804     auto VisitValueCB = [&](Value &V, const Instruction *,
3805                             AAAlign::StateType &T, bool Stripped) -> bool {
3806       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3807       if (!Stripped && this == &AA) {
3808         // Use only IR information if we did not strip anything.
3809         Align PA = V.getPointerAlignment(DL);
3810         T.takeKnownMaximum(PA.value());
3811         T.indicatePessimisticFixpoint();
3812       } else {
3813         // Use abstract attribute information.
3814         const AAAlign::StateType &DS =
3815             static_cast<const AAAlign::StateType &>(AA.getState());
3816         T ^= DS;
3817       }
3818       return T.isValidState();
3819     };
3820 
3821     StateType T;
3822     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3823                                                    VisitValueCB, getCtxI()))
3824       return indicatePessimisticFixpoint();
3825 
3826     // TODO: If we know we visited all incoming values, thus no are assumed
3827     // dead, we can take the known information from the state T.
3828     return clampStateAndIndicateChange(getState(), T);
3829   }
3830 
3831   /// See AbstractAttribute::trackStatistics()
3832   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3833 };
3834 
3835 /// Align attribute for function return value.
3836 struct AAAlignReturned final
3837     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3838   AAAlignReturned(const IRPosition &IRP, Attributor &A)
3839       : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP, A) {}
3840 
3841   /// See AbstractAttribute::trackStatistics()
3842   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3843 };
3844 
3845 /// Align attribute for function argument.
3846 struct AAAlignArgument final
3847     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3848   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3849   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3850 
3851   /// See AbstractAttribute::manifest(...).
3852   ChangeStatus manifest(Attributor &A) override {
3853     // If the associated argument is involved in a must-tail call we give up
3854     // because we would need to keep the argument alignments of caller and
3855     // callee in-sync. Just does not seem worth the trouble right now.
3856     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3857       return ChangeStatus::UNCHANGED;
3858     return Base::manifest(A);
3859   }
3860 
3861   /// See AbstractAttribute::trackStatistics()
3862   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3863 };
3864 
3865 struct AAAlignCallSiteArgument final : AAAlignFloating {
3866   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3867       : AAAlignFloating(IRP, A) {}
3868 
3869   /// See AbstractAttribute::manifest(...).
3870   ChangeStatus manifest(Attributor &A) override {
3871     // If the associated argument is involved in a must-tail call we give up
3872     // because we would need to keep the argument alignments of caller and
3873     // callee in-sync. Just does not seem worth the trouble right now.
3874     if (Argument *Arg = getAssociatedArgument())
3875       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3876         return ChangeStatus::UNCHANGED;
3877     ChangeStatus Changed = AAAlignImpl::manifest(A);
3878     Align InheritAlign =
3879         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3880     if (InheritAlign >= getAssumedAlign())
3881       Changed = ChangeStatus::UNCHANGED;
3882     return Changed;
3883   }
3884 
3885   /// See AbstractAttribute::updateImpl(Attributor &A).
3886   ChangeStatus updateImpl(Attributor &A) override {
3887     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3888     if (Argument *Arg = getAssociatedArgument()) {
3889       // We only take known information from the argument
3890       // so we do not need to track a dependence.
3891       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3892           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3893       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3894     }
3895     return Changed;
3896   }
3897 
3898   /// See AbstractAttribute::trackStatistics()
3899   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3900 };
3901 
3902 /// Align attribute deduction for a call site return value.
3903 struct AAAlignCallSiteReturned final
3904     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3905   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
3906   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3907       : Base(IRP, A) {}
3908 
3909   /// See AbstractAttribute::initialize(...).
3910   void initialize(Attributor &A) override {
3911     Base::initialize(A);
3912     Function *F = getAssociatedFunction();
3913     if (!F)
3914       indicatePessimisticFixpoint();
3915   }
3916 
3917   /// See AbstractAttribute::trackStatistics()
3918   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3919 };
3920 
3921 /// ------------------ Function No-Return Attribute ----------------------------
3922 struct AANoReturnImpl : public AANoReturn {
3923   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3924 
3925   /// See AbstractAttribute::initialize(...).
3926   void initialize(Attributor &A) override {
3927     AANoReturn::initialize(A);
3928     Function *F = getAssociatedFunction();
3929     if (!F)
3930       indicatePessimisticFixpoint();
3931   }
3932 
3933   /// See AbstractAttribute::getAsStr().
3934   const std::string getAsStr() const override {
3935     return getAssumed() ? "noreturn" : "may-return";
3936   }
3937 
3938   /// See AbstractAttribute::updateImpl(Attributor &A).
3939   virtual ChangeStatus updateImpl(Attributor &A) override {
3940     auto CheckForNoReturn = [](Instruction &) { return false; };
3941     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
3942                                    {(unsigned)Instruction::Ret}))
3943       return indicatePessimisticFixpoint();
3944     return ChangeStatus::UNCHANGED;
3945   }
3946 };
3947 
3948 struct AANoReturnFunction final : AANoReturnImpl {
3949   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
3950       : AANoReturnImpl(IRP, A) {}
3951 
3952   /// See AbstractAttribute::trackStatistics()
3953   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
3954 };
3955 
3956 /// NoReturn attribute deduction for a call sites.
3957 struct AANoReturnCallSite final : AANoReturnImpl {
3958   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
3959       : AANoReturnImpl(IRP, A) {}
3960 
3961   /// See AbstractAttribute::updateImpl(...).
3962   ChangeStatus updateImpl(Attributor &A) override {
3963     // TODO: Once we have call site specific value information we can provide
3964     //       call site specific liveness information and then it makes
3965     //       sense to specialize attributes for call sites arguments instead of
3966     //       redirecting requests to the callee argument.
3967     Function *F = getAssociatedFunction();
3968     const IRPosition &FnPos = IRPosition::function(*F);
3969     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
3970     return clampStateAndIndicateChange(
3971         getState(),
3972         static_cast<const AANoReturn::StateType &>(FnAA.getState()));
3973   }
3974 
3975   /// See AbstractAttribute::trackStatistics()
3976   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
3977 };
3978 
3979 /// ----------------------- Variable Capturing ---------------------------------
3980 
3981 /// A class to hold the state of for no-capture attributes.
3982 struct AANoCaptureImpl : public AANoCapture {
3983   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
3984 
3985   /// See AbstractAttribute::initialize(...).
3986   void initialize(Attributor &A) override {
3987     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
3988       indicateOptimisticFixpoint();
3989       return;
3990     }
3991     Function *AnchorScope = getAnchorScope();
3992     if (isFnInterfaceKind() &&
3993         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
3994       indicatePessimisticFixpoint();
3995       return;
3996     }
3997 
3998     // You cannot "capture" null in the default address space.
3999     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4000         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4001       indicateOptimisticFixpoint();
4002       return;
4003     }
4004 
4005     const Function *F = getArgNo() >= 0 ? getAssociatedFunction() : AnchorScope;
4006 
4007     // Check what state the associated function can actually capture.
4008     if (F)
4009       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4010     else
4011       indicatePessimisticFixpoint();
4012   }
4013 
4014   /// See AbstractAttribute::updateImpl(...).
4015   ChangeStatus updateImpl(Attributor &A) override;
4016 
4017   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4018   virtual void
4019   getDeducedAttributes(LLVMContext &Ctx,
4020                        SmallVectorImpl<Attribute> &Attrs) const override {
4021     if (!isAssumedNoCaptureMaybeReturned())
4022       return;
4023 
4024     if (getArgNo() >= 0) {
4025       if (isAssumedNoCapture())
4026         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4027       else if (ManifestInternal)
4028         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4029     }
4030   }
4031 
4032   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4033   /// depending on the ability of the function associated with \p IRP to capture
4034   /// state in memory and through "returning/throwing", respectively.
4035   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4036                                                    const Function &F,
4037                                                    BitIntegerState &State) {
4038     // TODO: Once we have memory behavior attributes we should use them here.
4039 
4040     // If we know we cannot communicate or write to memory, we do not care about
4041     // ptr2int anymore.
4042     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4043         F.getReturnType()->isVoidTy()) {
4044       State.addKnownBits(NO_CAPTURE);
4045       return;
4046     }
4047 
4048     // A function cannot capture state in memory if it only reads memory, it can
4049     // however return/throw state and the state might be influenced by the
4050     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4051     if (F.onlyReadsMemory())
4052       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4053 
4054     // A function cannot communicate state back if it does not through
4055     // exceptions and doesn not return values.
4056     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4057       State.addKnownBits(NOT_CAPTURED_IN_RET);
4058 
4059     // Check existing "returned" attributes.
4060     int ArgNo = IRP.getArgNo();
4061     if (F.doesNotThrow() && ArgNo >= 0) {
4062       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4063         if (F.hasParamAttribute(u, Attribute::Returned)) {
4064           if (u == unsigned(ArgNo))
4065             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4066           else if (F.onlyReadsMemory())
4067             State.addKnownBits(NO_CAPTURE);
4068           else
4069             State.addKnownBits(NOT_CAPTURED_IN_RET);
4070           break;
4071         }
4072     }
4073   }
4074 
4075   /// See AbstractState::getAsStr().
4076   const std::string getAsStr() const override {
4077     if (isKnownNoCapture())
4078       return "known not-captured";
4079     if (isAssumedNoCapture())
4080       return "assumed not-captured";
4081     if (isKnownNoCaptureMaybeReturned())
4082       return "known not-captured-maybe-returned";
4083     if (isAssumedNoCaptureMaybeReturned())
4084       return "assumed not-captured-maybe-returned";
4085     return "assumed-captured";
4086   }
4087 };
4088 
4089 /// Attributor-aware capture tracker.
4090 struct AACaptureUseTracker final : public CaptureTracker {
4091 
4092   /// Create a capture tracker that can lookup in-flight abstract attributes
4093   /// through the Attributor \p A.
4094   ///
4095   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4096   /// search is stopped. If a use leads to a return instruction,
4097   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4098   /// If a use leads to a ptr2int which may capture the value,
4099   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4100   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4101   /// set. All values in \p PotentialCopies are later tracked as well. For every
4102   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4103   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4104   /// conservatively set to true.
4105   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4106                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4107                       SmallVectorImpl<const Value *> &PotentialCopies,
4108                       unsigned &RemainingUsesToExplore)
4109       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4110         PotentialCopies(PotentialCopies),
4111         RemainingUsesToExplore(RemainingUsesToExplore) {}
4112 
4113   /// Determine if \p V maybe captured. *Also updates the state!*
4114   bool valueMayBeCaptured(const Value *V) {
4115     if (V->getType()->isPointerTy()) {
4116       PointerMayBeCaptured(V, this);
4117     } else {
4118       State.indicatePessimisticFixpoint();
4119     }
4120     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4121   }
4122 
4123   /// See CaptureTracker::tooManyUses().
4124   void tooManyUses() override {
4125     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4126   }
4127 
4128   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4129     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4130       return true;
4131     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4132         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4133         DepClassTy::OPTIONAL);
4134     return DerefAA.getAssumedDereferenceableBytes();
4135   }
4136 
4137   /// See CaptureTracker::captured(...).
4138   bool captured(const Use *U) override {
4139     Instruction *UInst = cast<Instruction>(U->getUser());
4140     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4141                       << "\n");
4142 
4143     // Because we may reuse the tracker multiple times we keep track of the
4144     // number of explored uses ourselves as well.
4145     if (RemainingUsesToExplore-- == 0) {
4146       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4147       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4148                           /* Return */ true);
4149     }
4150 
4151     // Deal with ptr2int by following uses.
4152     if (isa<PtrToIntInst>(UInst)) {
4153       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4154       return valueMayBeCaptured(UInst);
4155     }
4156 
4157     // Explicitly catch return instructions.
4158     if (isa<ReturnInst>(UInst))
4159       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4160                           /* Return */ true);
4161 
4162     // For now we only use special logic for call sites. However, the tracker
4163     // itself knows about a lot of other non-capturing cases already.
4164     auto *CB = dyn_cast<CallBase>(UInst);
4165     if (!CB || !CB->isArgOperand(U))
4166       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4167                           /* Return */ true);
4168 
4169     unsigned ArgNo = CB->getArgOperandNo(U);
4170     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4171     // If we have a abstract no-capture attribute for the argument we can use
4172     // it to justify a non-capture attribute here. This allows recursion!
4173     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4174     if (ArgNoCaptureAA.isAssumedNoCapture())
4175       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4176                           /* Return */ false);
4177     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4178       addPotentialCopy(*CB);
4179       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4180                           /* Return */ false);
4181     }
4182 
4183     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4184     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4185                         /* Return */ true);
4186   }
4187 
4188   /// Register \p CS as potential copy of the value we are checking.
4189   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4190 
4191   /// See CaptureTracker::shouldExplore(...).
4192   bool shouldExplore(const Use *U) override {
4193     // Check liveness and ignore droppable users.
4194     return !U->getUser()->isDroppable() &&
4195            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4196   }
4197 
4198   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4199   /// \p CapturedInRet, then return the appropriate value for use in the
4200   /// CaptureTracker::captured() interface.
4201   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4202                     bool CapturedInRet) {
4203     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4204                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4205     if (CapturedInMem)
4206       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4207     if (CapturedInInt)
4208       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4209     if (CapturedInRet)
4210       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4211     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4212   }
4213 
4214 private:
4215   /// The attributor providing in-flight abstract attributes.
4216   Attributor &A;
4217 
4218   /// The abstract attribute currently updated.
4219   AANoCapture &NoCaptureAA;
4220 
4221   /// The abstract liveness state.
4222   const AAIsDead &IsDeadAA;
4223 
4224   /// The state currently updated.
4225   AANoCapture::StateType &State;
4226 
4227   /// Set of potential copies of the tracked value.
4228   SmallVectorImpl<const Value *> &PotentialCopies;
4229 
4230   /// Global counter to limit the number of explored uses.
4231   unsigned &RemainingUsesToExplore;
4232 };
4233 
4234 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4235   const IRPosition &IRP = getIRPosition();
4236   const Value *V =
4237       getArgNo() >= 0 ? IRP.getAssociatedArgument() : &IRP.getAssociatedValue();
4238   if (!V)
4239     return indicatePessimisticFixpoint();
4240 
4241   const Function *F =
4242       getArgNo() >= 0 ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4243   assert(F && "Expected a function!");
4244   const IRPosition &FnPos = IRPosition::function(*F);
4245   const auto &IsDeadAA =
4246       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4247 
4248   AANoCapture::StateType T;
4249 
4250   // Readonly means we cannot capture through memory.
4251   const auto &FnMemAA =
4252       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4253   if (FnMemAA.isAssumedReadOnly()) {
4254     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4255     if (FnMemAA.isKnownReadOnly())
4256       addKnownBits(NOT_CAPTURED_IN_MEM);
4257     else
4258       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4259   }
4260 
4261   // Make sure all returned values are different than the underlying value.
4262   // TODO: we could do this in a more sophisticated way inside
4263   //       AAReturnedValues, e.g., track all values that escape through returns
4264   //       directly somehow.
4265   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4266     bool SeenConstant = false;
4267     for (auto &It : RVAA.returned_values()) {
4268       if (isa<Constant>(It.first)) {
4269         if (SeenConstant)
4270           return false;
4271         SeenConstant = true;
4272       } else if (!isa<Argument>(It.first) ||
4273                  It.first == getAssociatedArgument())
4274         return false;
4275     }
4276     return true;
4277   };
4278 
4279   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4280       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4281   if (NoUnwindAA.isAssumedNoUnwind()) {
4282     bool IsVoidTy = F->getReturnType()->isVoidTy();
4283     const AAReturnedValues *RVAA =
4284         IsVoidTy ? nullptr
4285                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4286                                                  /* TrackDependence */ true,
4287                                                  DepClassTy::OPTIONAL);
4288     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4289       T.addKnownBits(NOT_CAPTURED_IN_RET);
4290       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4291         return ChangeStatus::UNCHANGED;
4292       if (NoUnwindAA.isKnownNoUnwind() &&
4293           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4294         addKnownBits(NOT_CAPTURED_IN_RET);
4295         if (isKnown(NOT_CAPTURED_IN_MEM))
4296           return indicateOptimisticFixpoint();
4297       }
4298     }
4299   }
4300 
4301   // Use the CaptureTracker interface and logic with the specialized tracker,
4302   // defined in AACaptureUseTracker, that can look at in-flight abstract
4303   // attributes and directly updates the assumed state.
4304   SmallVector<const Value *, 4> PotentialCopies;
4305   unsigned RemainingUsesToExplore =
4306       getDefaultMaxUsesToExploreForCaptureTracking();
4307   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4308                               RemainingUsesToExplore);
4309 
4310   // Check all potential copies of the associated value until we can assume
4311   // none will be captured or we have to assume at least one might be.
4312   unsigned Idx = 0;
4313   PotentialCopies.push_back(V);
4314   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4315     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4316 
4317   AANoCapture::StateType &S = getState();
4318   auto Assumed = S.getAssumed();
4319   S.intersectAssumedBits(T.getAssumed());
4320   if (!isAssumedNoCaptureMaybeReturned())
4321     return indicatePessimisticFixpoint();
4322   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4323                                    : ChangeStatus::CHANGED;
4324 }
4325 
4326 /// NoCapture attribute for function arguments.
4327 struct AANoCaptureArgument final : AANoCaptureImpl {
4328   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4329       : AANoCaptureImpl(IRP, A) {}
4330 
4331   /// See AbstractAttribute::trackStatistics()
4332   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4333 };
4334 
4335 /// NoCapture attribute for call site arguments.
4336 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4337   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4338       : AANoCaptureImpl(IRP, A) {}
4339 
4340   /// See AbstractAttribute::initialize(...).
4341   void initialize(Attributor &A) override {
4342     if (Argument *Arg = getAssociatedArgument())
4343       if (Arg->hasByValAttr())
4344         indicateOptimisticFixpoint();
4345     AANoCaptureImpl::initialize(A);
4346   }
4347 
4348   /// See AbstractAttribute::updateImpl(...).
4349   ChangeStatus updateImpl(Attributor &A) override {
4350     // TODO: Once we have call site specific value information we can provide
4351     //       call site specific liveness information and then it makes
4352     //       sense to specialize attributes for call sites arguments instead of
4353     //       redirecting requests to the callee argument.
4354     Argument *Arg = getAssociatedArgument();
4355     if (!Arg)
4356       return indicatePessimisticFixpoint();
4357     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4358     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4359     return clampStateAndIndicateChange(
4360         getState(),
4361         static_cast<const AANoCapture::StateType &>(ArgAA.getState()));
4362   }
4363 
4364   /// See AbstractAttribute::trackStatistics()
4365   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4366 };
4367 
4368 /// NoCapture attribute for floating values.
4369 struct AANoCaptureFloating final : AANoCaptureImpl {
4370   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4371       : AANoCaptureImpl(IRP, A) {}
4372 
4373   /// See AbstractAttribute::trackStatistics()
4374   void trackStatistics() const override {
4375     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4376   }
4377 };
4378 
4379 /// NoCapture attribute for function return value.
4380 struct AANoCaptureReturned final : AANoCaptureImpl {
4381   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4382       : AANoCaptureImpl(IRP, A) {
4383     llvm_unreachable("NoCapture is not applicable to function returns!");
4384   }
4385 
4386   /// See AbstractAttribute::initialize(...).
4387   void initialize(Attributor &A) override {
4388     llvm_unreachable("NoCapture is not applicable to function returns!");
4389   }
4390 
4391   /// See AbstractAttribute::updateImpl(...).
4392   ChangeStatus updateImpl(Attributor &A) override {
4393     llvm_unreachable("NoCapture is not applicable to function returns!");
4394   }
4395 
4396   /// See AbstractAttribute::trackStatistics()
4397   void trackStatistics() const override {}
4398 };
4399 
4400 /// NoCapture attribute deduction for a call site return value.
4401 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4402   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4403       : AANoCaptureImpl(IRP, A) {}
4404 
4405   /// See AbstractAttribute::trackStatistics()
4406   void trackStatistics() const override {
4407     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4408   }
4409 };
4410 
4411 /// ------------------ Value Simplify Attribute ----------------------------
4412 struct AAValueSimplifyImpl : AAValueSimplify {
4413   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4414       : AAValueSimplify(IRP, A) {}
4415 
4416   /// See AbstractAttribute::initialize(...).
4417   void initialize(Attributor &A) override {
4418     if (getAssociatedValue().getType()->isVoidTy())
4419       indicatePessimisticFixpoint();
4420   }
4421 
4422   /// See AbstractAttribute::getAsStr().
4423   const std::string getAsStr() const override {
4424     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4425                         : "not-simple";
4426   }
4427 
4428   /// See AbstractAttribute::trackStatistics()
4429   void trackStatistics() const override {}
4430 
4431   /// See AAValueSimplify::getAssumedSimplifiedValue()
4432   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4433     if (!getAssumed())
4434       return const_cast<Value *>(&getAssociatedValue());
4435     return SimplifiedAssociatedValue;
4436   }
4437 
4438   /// Helper function for querying AAValueSimplify and updating candicate.
4439   /// \param QueryingValue Value trying to unify with SimplifiedValue
4440   /// \param AccumulatedSimplifiedValue Current simplification result.
4441   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4442                              Value &QueryingValue,
4443                              Optional<Value *> &AccumulatedSimplifiedValue) {
4444     // FIXME: Add a typecast support.
4445 
4446     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4447         QueryingAA, IRPosition::value(QueryingValue));
4448 
4449     Optional<Value *> QueryingValueSimplified =
4450         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4451 
4452     if (!QueryingValueSimplified.hasValue())
4453       return true;
4454 
4455     if (!QueryingValueSimplified.getValue())
4456       return false;
4457 
4458     Value &QueryingValueSimplifiedUnwrapped =
4459         *QueryingValueSimplified.getValue();
4460 
4461     if (AccumulatedSimplifiedValue.hasValue() &&
4462         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4463         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4464       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4465     if (AccumulatedSimplifiedValue.hasValue() &&
4466         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4467       return true;
4468 
4469     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4470                       << " is assumed to be "
4471                       << QueryingValueSimplifiedUnwrapped << "\n");
4472 
4473     AccumulatedSimplifiedValue = QueryingValueSimplified;
4474     return true;
4475   }
4476 
4477   bool askSimplifiedValueForAAValueConstantRange(Attributor &A) {
4478     if (!getAssociatedValue().getType()->isIntegerTy())
4479       return false;
4480 
4481     const auto &ValueConstantRangeAA =
4482         A.getAAFor<AAValueConstantRange>(*this, getIRPosition());
4483 
4484     Optional<ConstantInt *> COpt =
4485         ValueConstantRangeAA.getAssumedConstantInt(A);
4486     if (COpt.hasValue()) {
4487       if (auto *C = COpt.getValue())
4488         SimplifiedAssociatedValue = C;
4489       else
4490         return false;
4491     } else {
4492       SimplifiedAssociatedValue = llvm::None;
4493     }
4494     return true;
4495   }
4496 
4497   /// See AbstractAttribute::manifest(...).
4498   ChangeStatus manifest(Attributor &A) override {
4499     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4500 
4501     if (SimplifiedAssociatedValue.hasValue() &&
4502         !SimplifiedAssociatedValue.getValue())
4503       return Changed;
4504 
4505     Value &V = getAssociatedValue();
4506     auto *C = SimplifiedAssociatedValue.hasValue()
4507                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4508                   : UndefValue::get(V.getType());
4509     if (C) {
4510       // We can replace the AssociatedValue with the constant.
4511       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4512         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4513                           << " :: " << *this << "\n");
4514         if (A.changeValueAfterManifest(V, *C))
4515           Changed = ChangeStatus::CHANGED;
4516       }
4517     }
4518 
4519     return Changed | AAValueSimplify::manifest(A);
4520   }
4521 
4522   /// See AbstractState::indicatePessimisticFixpoint(...).
4523   ChangeStatus indicatePessimisticFixpoint() override {
4524     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4525     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4526     SimplifiedAssociatedValue = &getAssociatedValue();
4527     indicateOptimisticFixpoint();
4528     return ChangeStatus::CHANGED;
4529   }
4530 
4531 protected:
4532   // An assumed simplified value. Initially, it is set to Optional::None, which
4533   // means that the value is not clear under current assumption. If in the
4534   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4535   // returns orignal associated value.
4536   Optional<Value *> SimplifiedAssociatedValue;
4537 };
4538 
4539 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4540   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4541       : AAValueSimplifyImpl(IRP, A) {}
4542 
4543   void initialize(Attributor &A) override {
4544     AAValueSimplifyImpl::initialize(A);
4545     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4546       indicatePessimisticFixpoint();
4547     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4548                  Attribute::StructRet, Attribute::Nest},
4549                 /* IgnoreSubsumingPositions */ true))
4550       indicatePessimisticFixpoint();
4551 
4552     // FIXME: This is a hack to prevent us from propagating function poiner in
4553     // the new pass manager CGSCC pass as it creates call edges the
4554     // CallGraphUpdater cannot handle yet.
4555     Value &V = getAssociatedValue();
4556     if (V.getType()->isPointerTy() &&
4557         V.getType()->getPointerElementType()->isFunctionTy() &&
4558         !A.isModulePass())
4559       indicatePessimisticFixpoint();
4560   }
4561 
4562   /// See AbstractAttribute::updateImpl(...).
4563   ChangeStatus updateImpl(Attributor &A) override {
4564     // Byval is only replacable if it is readonly otherwise we would write into
4565     // the replaced value and not the copy that byval creates implicitly.
4566     Argument *Arg = getAssociatedArgument();
4567     if (Arg->hasByValAttr()) {
4568       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4569       //       there is no race by not copying a constant byval.
4570       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4571       if (!MemAA.isAssumedReadOnly())
4572         return indicatePessimisticFixpoint();
4573     }
4574 
4575     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4576 
4577     auto PredForCallSite = [&](AbstractCallSite ACS) {
4578       const IRPosition &ACSArgPos =
4579           IRPosition::callsite_argument(ACS, getArgNo());
4580       // Check if a coresponding argument was found or if it is on not
4581       // associated (which can happen for callback calls).
4582       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4583         return false;
4584 
4585       // We can only propagate thread independent values through callbacks.
4586       // This is different to direct/indirect call sites because for them we
4587       // know the thread executing the caller and callee is the same. For
4588       // callbacks this is not guaranteed, thus a thread dependent value could
4589       // be different for the caller and callee, making it invalid to propagate.
4590       Value &ArgOp = ACSArgPos.getAssociatedValue();
4591       if (ACS.isCallbackCall())
4592         if (auto *C = dyn_cast<Constant>(&ArgOp))
4593           if (C->isThreadDependent())
4594             return false;
4595       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4596     };
4597 
4598     bool AllCallSitesKnown;
4599     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4600                                 AllCallSitesKnown))
4601       if (!askSimplifiedValueForAAValueConstantRange(A))
4602         return indicatePessimisticFixpoint();
4603 
4604     // If a candicate was found in this update, return CHANGED.
4605     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4606                ? ChangeStatus::UNCHANGED
4607                : ChangeStatus ::CHANGED;
4608   }
4609 
4610   /// See AbstractAttribute::trackStatistics()
4611   void trackStatistics() const override {
4612     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4613   }
4614 };
4615 
4616 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4617   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4618       : AAValueSimplifyImpl(IRP, A) {}
4619 
4620   /// See AbstractAttribute::updateImpl(...).
4621   ChangeStatus updateImpl(Attributor &A) override {
4622     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4623 
4624     auto PredForReturned = [&](Value &V) {
4625       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4626     };
4627 
4628     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4629       if (!askSimplifiedValueForAAValueConstantRange(A))
4630         return indicatePessimisticFixpoint();
4631 
4632     // If a candicate was found in this update, return CHANGED.
4633     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4634                ? ChangeStatus::UNCHANGED
4635                : ChangeStatus ::CHANGED;
4636   }
4637 
4638   ChangeStatus manifest(Attributor &A) override {
4639     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4640 
4641     if (SimplifiedAssociatedValue.hasValue() &&
4642         !SimplifiedAssociatedValue.getValue())
4643       return Changed;
4644 
4645     Value &V = getAssociatedValue();
4646     auto *C = SimplifiedAssociatedValue.hasValue()
4647                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4648                   : UndefValue::get(V.getType());
4649     if (C) {
4650       auto PredForReturned =
4651           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4652             // We can replace the AssociatedValue with the constant.
4653             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4654               return true;
4655 
4656             for (ReturnInst *RI : RetInsts) {
4657               if (RI->getFunction() != getAnchorScope())
4658                 continue;
4659               auto *RC = C;
4660               if (RC->getType() != RI->getReturnValue()->getType())
4661                 RC = ConstantExpr::getBitCast(RC,
4662                                               RI->getReturnValue()->getType());
4663               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4664                                 << " in " << *RI << " :: " << *this << "\n");
4665               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4666                 Changed = ChangeStatus::CHANGED;
4667             }
4668             return true;
4669           };
4670       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4671     }
4672 
4673     return Changed | AAValueSimplify::manifest(A);
4674   }
4675 
4676   /// See AbstractAttribute::trackStatistics()
4677   void trackStatistics() const override {
4678     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4679   }
4680 };
4681 
4682 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4683   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4684       : AAValueSimplifyImpl(IRP, A) {}
4685 
4686   /// See AbstractAttribute::initialize(...).
4687   void initialize(Attributor &A) override {
4688     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4689     //        Needs investigation.
4690     // AAValueSimplifyImpl::initialize(A);
4691     Value &V = getAnchorValue();
4692 
4693     // TODO: add other stuffs
4694     if (isa<Constant>(V))
4695       indicatePessimisticFixpoint();
4696   }
4697 
4698   /// See AbstractAttribute::updateImpl(...).
4699   ChangeStatus updateImpl(Attributor &A) override {
4700     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4701 
4702     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4703                             bool Stripped) -> bool {
4704       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4705       if (!Stripped && this == &AA) {
4706         // TODO: Look the instruction and check recursively.
4707 
4708         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4709                           << "\n");
4710         return false;
4711       }
4712       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4713     };
4714 
4715     bool Dummy = false;
4716     if (!genericValueTraversal<AAValueSimplify, bool>(
4717             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4718             /* UseValueSimplify */ false))
4719       if (!askSimplifiedValueForAAValueConstantRange(A))
4720         return indicatePessimisticFixpoint();
4721 
4722     // If a candicate was found in this update, return CHANGED.
4723 
4724     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4725                ? ChangeStatus::UNCHANGED
4726                : ChangeStatus ::CHANGED;
4727   }
4728 
4729   /// See AbstractAttribute::trackStatistics()
4730   void trackStatistics() const override {
4731     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4732   }
4733 };
4734 
4735 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4736   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4737       : AAValueSimplifyImpl(IRP, A) {}
4738 
4739   /// See AbstractAttribute::initialize(...).
4740   void initialize(Attributor &A) override {
4741     SimplifiedAssociatedValue = &getAnchorValue();
4742     indicateOptimisticFixpoint();
4743   }
4744   /// See AbstractAttribute::initialize(...).
4745   ChangeStatus updateImpl(Attributor &A) override {
4746     llvm_unreachable(
4747         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4748   }
4749   /// See AbstractAttribute::trackStatistics()
4750   void trackStatistics() const override {
4751     STATS_DECLTRACK_FN_ATTR(value_simplify)
4752   }
4753 };
4754 
4755 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4756   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4757       : AAValueSimplifyFunction(IRP, A) {}
4758   /// See AbstractAttribute::trackStatistics()
4759   void trackStatistics() const override {
4760     STATS_DECLTRACK_CS_ATTR(value_simplify)
4761   }
4762 };
4763 
4764 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4765   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4766       : AAValueSimplifyReturned(IRP, A) {}
4767 
4768   /// See AbstractAttribute::manifest(...).
4769   ChangeStatus manifest(Attributor &A) override {
4770     return AAValueSimplifyImpl::manifest(A);
4771   }
4772 
4773   void trackStatistics() const override {
4774     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4775   }
4776 };
4777 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4778   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4779       : AAValueSimplifyFloating(IRP, A) {}
4780 
4781   /// See AbstractAttribute::manifest(...).
4782   ChangeStatus manifest(Attributor &A) override {
4783     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4784 
4785     if (SimplifiedAssociatedValue.hasValue() &&
4786         !SimplifiedAssociatedValue.getValue())
4787       return Changed;
4788 
4789     Value &V = getAssociatedValue();
4790     auto *C = SimplifiedAssociatedValue.hasValue()
4791                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4792                   : UndefValue::get(V.getType());
4793     if (C) {
4794       Use &U = cast<CallBase>(&getAnchorValue())->getArgOperandUse(getArgNo());
4795       // We can replace the AssociatedValue with the constant.
4796       if (&V != C && V.getType() == C->getType()) {
4797         if (A.changeUseAfterManifest(U, *C))
4798           Changed = ChangeStatus::CHANGED;
4799       }
4800     }
4801 
4802     return Changed | AAValueSimplify::manifest(A);
4803   }
4804 
4805   void trackStatistics() const override {
4806     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4807   }
4808 };
4809 
4810 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4811 struct AAHeapToStackImpl : public AAHeapToStack {
4812   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4813       : AAHeapToStack(IRP, A) {}
4814 
4815   const std::string getAsStr() const override {
4816     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4817   }
4818 
4819   ChangeStatus manifest(Attributor &A) override {
4820     assert(getState().isValidState() &&
4821            "Attempted to manifest an invalid state!");
4822 
4823     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4824     Function *F = getAnchorScope();
4825     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4826 
4827     for (Instruction *MallocCall : MallocCalls) {
4828       // This malloc cannot be replaced.
4829       if (BadMallocCalls.count(MallocCall))
4830         continue;
4831 
4832       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4833         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4834         A.deleteAfterManifest(*FreeCall);
4835         HasChanged = ChangeStatus::CHANGED;
4836       }
4837 
4838       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4839                         << "\n");
4840 
4841       Align Alignment;
4842       Constant *Size;
4843       if (isCallocLikeFn(MallocCall, TLI)) {
4844         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
4845         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
4846         APInt TotalSize = SizeT->getValue() * Num->getValue();
4847         Size =
4848             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
4849       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
4850         Size = cast<ConstantInt>(MallocCall->getOperand(1));
4851         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
4852                                    ->getValue()
4853                                    .getZExtValue())
4854                         .valueOrOne();
4855       } else {
4856         Size = cast<ConstantInt>(MallocCall->getOperand(0));
4857       }
4858 
4859       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
4860       Instruction *AI =
4861           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
4862                          "", MallocCall->getNextNode());
4863 
4864       if (AI->getType() != MallocCall->getType())
4865         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
4866                              AI->getNextNode());
4867 
4868       A.changeValueAfterManifest(*MallocCall, *AI);
4869 
4870       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
4871         auto *NBB = II->getNormalDest();
4872         BranchInst::Create(NBB, MallocCall->getParent());
4873         A.deleteAfterManifest(*MallocCall);
4874       } else {
4875         A.deleteAfterManifest(*MallocCall);
4876       }
4877 
4878       // Zero out the allocated memory if it was a calloc.
4879       if (isCallocLikeFn(MallocCall, TLI)) {
4880         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
4881                                    AI->getNextNode());
4882         Value *Ops[] = {
4883             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
4884             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
4885 
4886         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
4887         Module *M = F->getParent();
4888         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
4889         CallInst::Create(Fn, Ops, "", BI->getNextNode());
4890       }
4891       HasChanged = ChangeStatus::CHANGED;
4892     }
4893 
4894     return HasChanged;
4895   }
4896 
4897   /// Collection of all malloc calls in a function.
4898   SmallSetVector<Instruction *, 4> MallocCalls;
4899 
4900   /// Collection of malloc calls that cannot be converted.
4901   DenseSet<const Instruction *> BadMallocCalls;
4902 
4903   /// A map for each malloc call to the set of associated free calls.
4904   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
4905 
4906   ChangeStatus updateImpl(Attributor &A) override;
4907 };
4908 
4909 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
4910   const Function *F = getAnchorScope();
4911   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4912 
4913   MustBeExecutedContextExplorer &Explorer =
4914       A.getInfoCache().getMustBeExecutedContextExplorer();
4915 
4916   auto FreeCheck = [&](Instruction &I) {
4917     const auto &Frees = FreesForMalloc.lookup(&I);
4918     if (Frees.size() != 1)
4919       return false;
4920     Instruction *UniqueFree = *Frees.begin();
4921     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
4922   };
4923 
4924   auto UsesCheck = [&](Instruction &I) {
4925     bool ValidUsesOnly = true;
4926     bool MustUse = true;
4927     auto Pred = [&](const Use &U, bool &Follow) -> bool {
4928       Instruction *UserI = cast<Instruction>(U.getUser());
4929       if (isa<LoadInst>(UserI))
4930         return true;
4931       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
4932         if (SI->getValueOperand() == U.get()) {
4933           LLVM_DEBUG(dbgs()
4934                      << "[H2S] escaping store to memory: " << *UserI << "\n");
4935           ValidUsesOnly = false;
4936         } else {
4937           // A store into the malloc'ed memory is fine.
4938         }
4939         return true;
4940       }
4941       if (auto *CB = dyn_cast<CallBase>(UserI)) {
4942         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
4943           return true;
4944         // Record malloc.
4945         if (isFreeCall(UserI, TLI)) {
4946           if (MustUse) {
4947             FreesForMalloc[&I].insert(UserI);
4948           } else {
4949             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
4950                               << *UserI << "\n");
4951             ValidUsesOnly = false;
4952           }
4953           return true;
4954         }
4955 
4956         unsigned ArgNo = CB->getArgOperandNo(&U);
4957 
4958         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
4959             *this, IRPosition::callsite_argument(*CB, ArgNo));
4960 
4961         // If a callsite argument use is nofree, we are fine.
4962         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
4963             *this, IRPosition::callsite_argument(*CB, ArgNo));
4964 
4965         if (!NoCaptureAA.isAssumedNoCapture() ||
4966             !ArgNoFreeAA.isAssumedNoFree()) {
4967           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
4968           ValidUsesOnly = false;
4969         }
4970         return true;
4971       }
4972 
4973       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
4974           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
4975         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
4976         Follow = true;
4977         return true;
4978       }
4979       // Unknown user for which we can not track uses further (in a way that
4980       // makes sense).
4981       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
4982       ValidUsesOnly = false;
4983       return true;
4984     };
4985     A.checkForAllUses(Pred, *this, I);
4986     return ValidUsesOnly;
4987   };
4988 
4989   auto MallocCallocCheck = [&](Instruction &I) {
4990     if (BadMallocCalls.count(&I))
4991       return true;
4992 
4993     bool IsMalloc = isMallocLikeFn(&I, TLI);
4994     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
4995     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
4996     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
4997       BadMallocCalls.insert(&I);
4998       return true;
4999     }
5000 
5001     if (IsMalloc) {
5002       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5003         if (Size->getValue().ule(MaxHeapToStackSize))
5004           if (UsesCheck(I) || FreeCheck(I)) {
5005             MallocCalls.insert(&I);
5006             return true;
5007           }
5008     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5009       // Only if the alignment and sizes are constant.
5010       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5011         if (Size->getValue().ule(MaxHeapToStackSize))
5012           if (UsesCheck(I) || FreeCheck(I)) {
5013             MallocCalls.insert(&I);
5014             return true;
5015           }
5016     } else if (IsCalloc) {
5017       bool Overflow = false;
5018       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5019         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5020           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5021                   .ule(MaxHeapToStackSize))
5022             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5023               MallocCalls.insert(&I);
5024               return true;
5025             }
5026     }
5027 
5028     BadMallocCalls.insert(&I);
5029     return true;
5030   };
5031 
5032   size_t NumBadMallocs = BadMallocCalls.size();
5033 
5034   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5035 
5036   if (NumBadMallocs != BadMallocCalls.size())
5037     return ChangeStatus::CHANGED;
5038 
5039   return ChangeStatus::UNCHANGED;
5040 }
5041 
5042 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5043   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5044       : AAHeapToStackImpl(IRP, A) {}
5045 
5046   /// See AbstractAttribute::trackStatistics().
5047   void trackStatistics() const override {
5048     STATS_DECL(
5049         MallocCalls, Function,
5050         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5051     for (auto *C : MallocCalls)
5052       if (!BadMallocCalls.count(C))
5053         ++BUILD_STAT_NAME(MallocCalls, Function);
5054   }
5055 };
5056 
5057 /// ----------------------- Privatizable Pointers ------------------------------
5058 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5059   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5060       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5061 
5062   ChangeStatus indicatePessimisticFixpoint() override {
5063     AAPrivatizablePtr::indicatePessimisticFixpoint();
5064     PrivatizableType = nullptr;
5065     return ChangeStatus::CHANGED;
5066   }
5067 
5068   /// Identify the type we can chose for a private copy of the underlying
5069   /// argument. None means it is not clear yet, nullptr means there is none.
5070   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5071 
5072   /// Return a privatizable type that encloses both T0 and T1.
5073   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5074   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5075     if (!T0.hasValue())
5076       return T1;
5077     if (!T1.hasValue())
5078       return T0;
5079     if (T0 == T1)
5080       return T0;
5081     return nullptr;
5082   }
5083 
5084   Optional<Type *> getPrivatizableType() const override {
5085     return PrivatizableType;
5086   }
5087 
5088   const std::string getAsStr() const override {
5089     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5090   }
5091 
5092 protected:
5093   Optional<Type *> PrivatizableType;
5094 };
5095 
5096 // TODO: Do this for call site arguments (probably also other values) as well.
5097 
5098 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5099   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5100       : AAPrivatizablePtrImpl(IRP, A) {}
5101 
5102   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5103   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5104     // If this is a byval argument and we know all the call sites (so we can
5105     // rewrite them), there is no need to check them explicitly.
5106     bool AllCallSitesKnown;
5107     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5108         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5109                                true, AllCallSitesKnown))
5110       return getAssociatedValue().getType()->getPointerElementType();
5111 
5112     Optional<Type *> Ty;
5113     unsigned ArgNo = getIRPosition().getArgNo();
5114 
5115     // Make sure the associated call site argument has the same type at all call
5116     // sites and it is an allocation we know is safe to privatize, for now that
5117     // means we only allow alloca instructions.
5118     // TODO: We can additionally analyze the accesses in the callee to  create
5119     //       the type from that information instead. That is a little more
5120     //       involved and will be done in a follow up patch.
5121     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5122       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5123       // Check if a coresponding argument was found or if it is one not
5124       // associated (which can happen for callback calls).
5125       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5126         return false;
5127 
5128       // Check that all call sites agree on a type.
5129       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5130       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5131 
5132       LLVM_DEBUG({
5133         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5134         if (CSTy.hasValue() && CSTy.getValue())
5135           CSTy.getValue()->print(dbgs());
5136         else if (CSTy.hasValue())
5137           dbgs() << "<nullptr>";
5138         else
5139           dbgs() << "<none>";
5140       });
5141 
5142       Ty = combineTypes(Ty, CSTy);
5143 
5144       LLVM_DEBUG({
5145         dbgs() << " : New Type: ";
5146         if (Ty.hasValue() && Ty.getValue())
5147           Ty.getValue()->print(dbgs());
5148         else if (Ty.hasValue())
5149           dbgs() << "<nullptr>";
5150         else
5151           dbgs() << "<none>";
5152         dbgs() << "\n";
5153       });
5154 
5155       return !Ty.hasValue() || Ty.getValue();
5156     };
5157 
5158     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5159       return nullptr;
5160     return Ty;
5161   }
5162 
5163   /// See AbstractAttribute::updateImpl(...).
5164   ChangeStatus updateImpl(Attributor &A) override {
5165     PrivatizableType = identifyPrivatizableType(A);
5166     if (!PrivatizableType.hasValue())
5167       return ChangeStatus::UNCHANGED;
5168     if (!PrivatizableType.getValue())
5169       return indicatePessimisticFixpoint();
5170 
5171     // The dependence is optional so we don't give up once we give up on the
5172     // alignment.
5173     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5174                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5175 
5176     // Avoid arguments with padding for now.
5177     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5178         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5179                                                 A.getInfoCache().getDL())) {
5180       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5181       return indicatePessimisticFixpoint();
5182     }
5183 
5184     // Verify callee and caller agree on how the promoted argument would be
5185     // passed.
5186     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5187     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5188     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5189     Function &Fn = *getIRPosition().getAnchorScope();
5190     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5191     ArgsToPromote.insert(getAssociatedArgument());
5192     const auto *TTI =
5193         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5194     if (!TTI ||
5195         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5196             Fn, *TTI, ArgsToPromote, Dummy) ||
5197         ArgsToPromote.empty()) {
5198       LLVM_DEBUG(
5199           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5200                  << Fn.getName() << "\n");
5201       return indicatePessimisticFixpoint();
5202     }
5203 
5204     // Collect the types that will replace the privatizable type in the function
5205     // signature.
5206     SmallVector<Type *, 16> ReplacementTypes;
5207     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5208 
5209     // Register a rewrite of the argument.
5210     Argument *Arg = getAssociatedArgument();
5211     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5212       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5213       return indicatePessimisticFixpoint();
5214     }
5215 
5216     unsigned ArgNo = Arg->getArgNo();
5217 
5218     // Helper to check if for the given call site the associated argument is
5219     // passed to a callback where the privatization would be different.
5220     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5221       SmallVector<const Use *, 4> CallbackUses;
5222       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5223       for (const Use *U : CallbackUses) {
5224         AbstractCallSite CBACS(U);
5225         assert(CBACS && CBACS.isCallbackCall());
5226         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5227           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5228 
5229           LLVM_DEBUG({
5230             dbgs()
5231                 << "[AAPrivatizablePtr] Argument " << *Arg
5232                 << "check if can be privatized in the context of its parent ("
5233                 << Arg->getParent()->getName()
5234                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5235                    "callback ("
5236                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5237                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5238                 << CBACS.getCallArgOperand(CBArg) << " vs "
5239                 << CB.getArgOperand(ArgNo) << "\n"
5240                 << "[AAPrivatizablePtr] " << CBArg << " : "
5241                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5242           });
5243 
5244           if (CBArgNo != int(ArgNo))
5245             continue;
5246           const auto &CBArgPrivAA =
5247               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5248           if (CBArgPrivAA.isValidState()) {
5249             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5250             if (!CBArgPrivTy.hasValue())
5251               continue;
5252             if (CBArgPrivTy.getValue() == PrivatizableType)
5253               continue;
5254           }
5255 
5256           LLVM_DEBUG({
5257             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5258                    << " cannot be privatized in the context of its parent ("
5259                    << Arg->getParent()->getName()
5260                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5261                       "callback ("
5262                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5263                    << ").\n[AAPrivatizablePtr] for which the argument "
5264                       "privatization is not compatible.\n";
5265           });
5266           return false;
5267         }
5268       }
5269       return true;
5270     };
5271 
5272     // Helper to check if for the given call site the associated argument is
5273     // passed to a direct call where the privatization would be different.
5274     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5275       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5276       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5277       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5278              "Expected a direct call operand for callback call operand");
5279 
5280       LLVM_DEBUG({
5281         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5282                << " check if be privatized in the context of its parent ("
5283                << Arg->getParent()->getName()
5284                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5285                   "direct call of ("
5286                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5287                << ").\n";
5288       });
5289 
5290       Function *DCCallee = DC->getCalledFunction();
5291       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5292         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5293             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5294         if (DCArgPrivAA.isValidState()) {
5295           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5296           if (!DCArgPrivTy.hasValue())
5297             return true;
5298           if (DCArgPrivTy.getValue() == PrivatizableType)
5299             return true;
5300         }
5301       }
5302 
5303       LLVM_DEBUG({
5304         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5305                << " cannot be privatized in the context of its parent ("
5306                << Arg->getParent()->getName()
5307                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5308                   "direct call of ("
5309                << ACS.getInstruction()->getCalledFunction()->getName()
5310                << ").\n[AAPrivatizablePtr] for which the argument "
5311                   "privatization is not compatible.\n";
5312       });
5313       return false;
5314     };
5315 
5316     // Helper to check if the associated argument is used at the given abstract
5317     // call site in a way that is incompatible with the privatization assumed
5318     // here.
5319     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5320       if (ACS.isDirectCall())
5321         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5322       if (ACS.isCallbackCall())
5323         return IsCompatiblePrivArgOfDirectCS(ACS);
5324       return false;
5325     };
5326 
5327     bool AllCallSitesKnown;
5328     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5329                                 AllCallSitesKnown))
5330       return indicatePessimisticFixpoint();
5331 
5332     return ChangeStatus::UNCHANGED;
5333   }
5334 
5335   /// Given a type to private \p PrivType, collect the constituates (which are
5336   /// used) in \p ReplacementTypes.
5337   static void
5338   identifyReplacementTypes(Type *PrivType,
5339                            SmallVectorImpl<Type *> &ReplacementTypes) {
5340     // TODO: For now we expand the privatization type to the fullest which can
5341     //       lead to dead arguments that need to be removed later.
5342     assert(PrivType && "Expected privatizable type!");
5343 
5344     // Traverse the type, extract constituate types on the outermost level.
5345     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5346       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5347         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5348     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5349       ReplacementTypes.append(PrivArrayType->getNumElements(),
5350                               PrivArrayType->getElementType());
5351     } else {
5352       ReplacementTypes.push_back(PrivType);
5353     }
5354   }
5355 
5356   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5357   /// The values needed are taken from the arguments of \p F starting at
5358   /// position \p ArgNo.
5359   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5360                                    unsigned ArgNo, Instruction &IP) {
5361     assert(PrivType && "Expected privatizable type!");
5362 
5363     IRBuilder<NoFolder> IRB(&IP);
5364     const DataLayout &DL = F.getParent()->getDataLayout();
5365 
5366     // Traverse the type, build GEPs and stores.
5367     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5368       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5369       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5370         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5371         Value *Ptr = constructPointer(
5372             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5373         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5374       }
5375     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5376       Type *PointeePtrTy = PrivArrayType->getElementType()->getPointerTo();
5377       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeePtrTy);
5378       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5379         Value *Ptr =
5380             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5381         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5382       }
5383     } else {
5384       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5385     }
5386   }
5387 
5388   /// Extract values from \p Base according to the type \p PrivType at the
5389   /// call position \p ACS. The values are appended to \p ReplacementValues.
5390   void createReplacementValues(Align Alignment, Type *PrivType,
5391                                AbstractCallSite ACS, Value *Base,
5392                                SmallVectorImpl<Value *> &ReplacementValues) {
5393     assert(Base && "Expected base value!");
5394     assert(PrivType && "Expected privatizable type!");
5395     Instruction *IP = ACS.getInstruction();
5396 
5397     IRBuilder<NoFolder> IRB(IP);
5398     const DataLayout &DL = IP->getModule()->getDataLayout();
5399 
5400     if (Base->getType()->getPointerElementType() != PrivType)
5401       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5402                                                  "", ACS.getInstruction());
5403 
5404     // Traverse the type, build GEPs and loads.
5405     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5406       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5407       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5408         Type *PointeeTy = PrivStructType->getElementType(u);
5409         Value *Ptr =
5410             constructPointer(PointeeTy->getPointerTo(), Base,
5411                              PrivStructLayout->getElementOffset(u), IRB, DL);
5412         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5413         L->setAlignment(Alignment);
5414         ReplacementValues.push_back(L);
5415       }
5416     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5417       Type *PointeeTy = PrivArrayType->getElementType();
5418       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5419       Type *PointeePtrTy = PointeeTy->getPointerTo();
5420       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5421         Value *Ptr =
5422             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5423         LoadInst *L = new LoadInst(PointeePtrTy, Ptr, "", IP);
5424         L->setAlignment(Alignment);
5425         ReplacementValues.push_back(L);
5426       }
5427     } else {
5428       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5429       L->setAlignment(Alignment);
5430       ReplacementValues.push_back(L);
5431     }
5432   }
5433 
5434   /// See AbstractAttribute::manifest(...)
5435   ChangeStatus manifest(Attributor &A) override {
5436     if (!PrivatizableType.hasValue())
5437       return ChangeStatus::UNCHANGED;
5438     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5439 
5440     // Collect all tail calls in the function as we cannot allow new allocas to
5441     // escape into tail recursion.
5442     // TODO: Be smarter about new allocas escaping into tail calls.
5443     SmallVector<CallInst *, 16> TailCalls;
5444     if (!A.checkForAllInstructions(
5445             [&](Instruction &I) {
5446               CallInst &CI = cast<CallInst>(I);
5447               if (CI.isTailCall())
5448                 TailCalls.push_back(&CI);
5449               return true;
5450             },
5451             *this, {Instruction::Call}))
5452       return ChangeStatus::UNCHANGED;
5453 
5454     Argument *Arg = getAssociatedArgument();
5455     // Query AAAlign attribute for alignment of associated argument to
5456     // determine the best alignment of loads.
5457     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5458 
5459     // Callback to repair the associated function. A new alloca is placed at the
5460     // beginning and initialized with the values passed through arguments. The
5461     // new alloca replaces the use of the old pointer argument.
5462     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5463         [=](const Attributor::ArgumentReplacementInfo &ARI,
5464             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5465           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5466           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5467           auto *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5468                                     Arg->getName() + ".priv", IP);
5469           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5470                                ArgIt->getArgNo(), *IP);
5471           Arg->replaceAllUsesWith(AI);
5472 
5473           for (CallInst *CI : TailCalls)
5474             CI->setTailCall(false);
5475         };
5476 
5477     // Callback to repair a call site of the associated function. The elements
5478     // of the privatizable type are loaded prior to the call and passed to the
5479     // new function version.
5480     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5481         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5482                       AbstractCallSite ACS,
5483                       SmallVectorImpl<Value *> &NewArgOperands) {
5484           // When no alignment is specified for the load instruction,
5485           // natural alignment is assumed.
5486           createReplacementValues(
5487               assumeAligned(AlignAA.getAssumedAlign()),
5488               PrivatizableType.getValue(), ACS,
5489               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5490               NewArgOperands);
5491         };
5492 
5493     // Collect the types that will replace the privatizable type in the function
5494     // signature.
5495     SmallVector<Type *, 16> ReplacementTypes;
5496     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5497 
5498     // Register a rewrite of the argument.
5499     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5500                                            std::move(FnRepairCB),
5501                                            std::move(ACSRepairCB)))
5502       return ChangeStatus::CHANGED;
5503     return ChangeStatus::UNCHANGED;
5504   }
5505 
5506   /// See AbstractAttribute::trackStatistics()
5507   void trackStatistics() const override {
5508     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5509   }
5510 };
5511 
5512 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5513   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5514       : AAPrivatizablePtrImpl(IRP, A) {}
5515 
5516   /// See AbstractAttribute::initialize(...).
5517   virtual void initialize(Attributor &A) override {
5518     // TODO: We can privatize more than arguments.
5519     indicatePessimisticFixpoint();
5520   }
5521 
5522   ChangeStatus updateImpl(Attributor &A) override {
5523     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5524                      "updateImpl will not be called");
5525   }
5526 
5527   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5528   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5529     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5530     if (!Obj) {
5531       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5532       return nullptr;
5533     }
5534 
5535     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5536       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5537         if (CI->isOne())
5538           return Obj->getType()->getPointerElementType();
5539     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5540       auto &PrivArgAA =
5541           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5542       if (PrivArgAA.isAssumedPrivatizablePtr())
5543         return Obj->getType()->getPointerElementType();
5544     }
5545 
5546     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5547                          "alloca nor privatizable argument: "
5548                       << *Obj << "!\n");
5549     return nullptr;
5550   }
5551 
5552   /// See AbstractAttribute::trackStatistics()
5553   void trackStatistics() const override {
5554     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5555   }
5556 };
5557 
5558 struct AAPrivatizablePtrCallSiteArgument final
5559     : public AAPrivatizablePtrFloating {
5560   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5561       : AAPrivatizablePtrFloating(IRP, A) {}
5562 
5563   /// See AbstractAttribute::initialize(...).
5564   void initialize(Attributor &A) override {
5565     if (getIRPosition().hasAttr(Attribute::ByVal))
5566       indicateOptimisticFixpoint();
5567   }
5568 
5569   /// See AbstractAttribute::updateImpl(...).
5570   ChangeStatus updateImpl(Attributor &A) override {
5571     PrivatizableType = identifyPrivatizableType(A);
5572     if (!PrivatizableType.hasValue())
5573       return ChangeStatus::UNCHANGED;
5574     if (!PrivatizableType.getValue())
5575       return indicatePessimisticFixpoint();
5576 
5577     const IRPosition &IRP = getIRPosition();
5578     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5579     if (!NoCaptureAA.isAssumedNoCapture()) {
5580       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5581       return indicatePessimisticFixpoint();
5582     }
5583 
5584     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5585     if (!NoAliasAA.isAssumedNoAlias()) {
5586       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5587       return indicatePessimisticFixpoint();
5588     }
5589 
5590     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5591     if (!MemBehaviorAA.isAssumedReadOnly()) {
5592       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5593       return indicatePessimisticFixpoint();
5594     }
5595 
5596     return ChangeStatus::UNCHANGED;
5597   }
5598 
5599   /// See AbstractAttribute::trackStatistics()
5600   void trackStatistics() const override {
5601     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5602   }
5603 };
5604 
5605 struct AAPrivatizablePtrCallSiteReturned final
5606     : public AAPrivatizablePtrFloating {
5607   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5608       : AAPrivatizablePtrFloating(IRP, A) {}
5609 
5610   /// See AbstractAttribute::initialize(...).
5611   void initialize(Attributor &A) override {
5612     // TODO: We can privatize more than arguments.
5613     indicatePessimisticFixpoint();
5614   }
5615 
5616   /// See AbstractAttribute::trackStatistics()
5617   void trackStatistics() const override {
5618     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5619   }
5620 };
5621 
5622 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5623   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5624       : AAPrivatizablePtrFloating(IRP, A) {}
5625 
5626   /// See AbstractAttribute::initialize(...).
5627   void initialize(Attributor &A) override {
5628     // TODO: We can privatize more than arguments.
5629     indicatePessimisticFixpoint();
5630   }
5631 
5632   /// See AbstractAttribute::trackStatistics()
5633   void trackStatistics() const override {
5634     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5635   }
5636 };
5637 
5638 /// -------------------- Memory Behavior Attributes ----------------------------
5639 /// Includes read-none, read-only, and write-only.
5640 /// ----------------------------------------------------------------------------
5641 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5642   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5643       : AAMemoryBehavior(IRP, A) {}
5644 
5645   /// See AbstractAttribute::initialize(...).
5646   void initialize(Attributor &A) override {
5647     intersectAssumedBits(BEST_STATE);
5648     getKnownStateFromValue(getIRPosition(), getState());
5649     IRAttribute::initialize(A);
5650   }
5651 
5652   /// Return the memory behavior information encoded in the IR for \p IRP.
5653   static void getKnownStateFromValue(const IRPosition &IRP,
5654                                      BitIntegerState &State,
5655                                      bool IgnoreSubsumingPositions = false) {
5656     SmallVector<Attribute, 2> Attrs;
5657     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5658     for (const Attribute &Attr : Attrs) {
5659       switch (Attr.getKindAsEnum()) {
5660       case Attribute::ReadNone:
5661         State.addKnownBits(NO_ACCESSES);
5662         break;
5663       case Attribute::ReadOnly:
5664         State.addKnownBits(NO_WRITES);
5665         break;
5666       case Attribute::WriteOnly:
5667         State.addKnownBits(NO_READS);
5668         break;
5669       default:
5670         llvm_unreachable("Unexpected attribute!");
5671       }
5672     }
5673 
5674     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5675       if (!I->mayReadFromMemory())
5676         State.addKnownBits(NO_READS);
5677       if (!I->mayWriteToMemory())
5678         State.addKnownBits(NO_WRITES);
5679     }
5680   }
5681 
5682   /// See AbstractAttribute::getDeducedAttributes(...).
5683   void getDeducedAttributes(LLVMContext &Ctx,
5684                             SmallVectorImpl<Attribute> &Attrs) const override {
5685     assert(Attrs.size() == 0);
5686     if (isAssumedReadNone())
5687       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5688     else if (isAssumedReadOnly())
5689       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5690     else if (isAssumedWriteOnly())
5691       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5692     assert(Attrs.size() <= 1);
5693   }
5694 
5695   /// See AbstractAttribute::manifest(...).
5696   ChangeStatus manifest(Attributor &A) override {
5697     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5698       return ChangeStatus::UNCHANGED;
5699 
5700     const IRPosition &IRP = getIRPosition();
5701 
5702     // Check if we would improve the existing attributes first.
5703     SmallVector<Attribute, 4> DeducedAttrs;
5704     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5705     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5706           return IRP.hasAttr(Attr.getKindAsEnum(),
5707                              /* IgnoreSubsumingPositions */ true);
5708         }))
5709       return ChangeStatus::UNCHANGED;
5710 
5711     // Clear existing attributes.
5712     IRP.removeAttrs(AttrKinds);
5713 
5714     // Use the generic manifest method.
5715     return IRAttribute::manifest(A);
5716   }
5717 
5718   /// See AbstractState::getAsStr().
5719   const std::string getAsStr() const override {
5720     if (isAssumedReadNone())
5721       return "readnone";
5722     if (isAssumedReadOnly())
5723       return "readonly";
5724     if (isAssumedWriteOnly())
5725       return "writeonly";
5726     return "may-read/write";
5727   }
5728 
5729   /// The set of IR attributes AAMemoryBehavior deals with.
5730   static const Attribute::AttrKind AttrKinds[3];
5731 };
5732 
5733 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5734     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5735 
5736 /// Memory behavior attribute for a floating value.
5737 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5738   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5739       : AAMemoryBehaviorImpl(IRP, A) {}
5740 
5741   /// See AbstractAttribute::initialize(...).
5742   void initialize(Attributor &A) override {
5743     AAMemoryBehaviorImpl::initialize(A);
5744     // Initialize the use vector with all direct uses of the associated value.
5745     for (const Use &U : getAssociatedValue().uses())
5746       Uses.insert(&U);
5747   }
5748 
5749   /// See AbstractAttribute::updateImpl(...).
5750   ChangeStatus updateImpl(Attributor &A) override;
5751 
5752   /// See AbstractAttribute::trackStatistics()
5753   void trackStatistics() const override {
5754     if (isAssumedReadNone())
5755       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5756     else if (isAssumedReadOnly())
5757       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5758     else if (isAssumedWriteOnly())
5759       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5760   }
5761 
5762 private:
5763   /// Return true if users of \p UserI might access the underlying
5764   /// variable/location described by \p U and should therefore be analyzed.
5765   bool followUsersOfUseIn(Attributor &A, const Use *U,
5766                           const Instruction *UserI);
5767 
5768   /// Update the state according to the effect of use \p U in \p UserI.
5769   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5770 
5771 protected:
5772   /// Container for (transitive) uses of the associated argument.
5773   SetVector<const Use *> Uses;
5774 };
5775 
5776 /// Memory behavior attribute for function argument.
5777 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
5778   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5779       : AAMemoryBehaviorFloating(IRP, A) {}
5780 
5781   /// See AbstractAttribute::initialize(...).
5782   void initialize(Attributor &A) override {
5783     intersectAssumedBits(BEST_STATE);
5784     const IRPosition &IRP = getIRPosition();
5785     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5786     // can query it when we use has/getAttr. That would allow us to reuse the
5787     // initialize of the base class here.
5788     bool HasByVal =
5789         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5790     getKnownStateFromValue(IRP, getState(),
5791                            /* IgnoreSubsumingPositions */ HasByVal);
5792 
5793     // Initialize the use vector with all direct uses of the associated value.
5794     Argument *Arg = getAssociatedArgument();
5795     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5796       indicatePessimisticFixpoint();
5797     } else {
5798       // Initialize the use vector with all direct uses of the associated value.
5799       for (const Use &U : Arg->uses())
5800         Uses.insert(&U);
5801     }
5802   }
5803 
5804   ChangeStatus manifest(Attributor &A) override {
5805     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5806     if (!getAssociatedValue().getType()->isPointerTy())
5807       return ChangeStatus::UNCHANGED;
5808 
5809     // TODO: From readattrs.ll: "inalloca parameters are always
5810     //                           considered written"
5811     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5812       removeKnownBits(NO_WRITES);
5813       removeAssumedBits(NO_WRITES);
5814     }
5815     return AAMemoryBehaviorFloating::manifest(A);
5816   }
5817 
5818   /// See AbstractAttribute::trackStatistics()
5819   void trackStatistics() const override {
5820     if (isAssumedReadNone())
5821       STATS_DECLTRACK_ARG_ATTR(readnone)
5822     else if (isAssumedReadOnly())
5823       STATS_DECLTRACK_ARG_ATTR(readonly)
5824     else if (isAssumedWriteOnly())
5825       STATS_DECLTRACK_ARG_ATTR(writeonly)
5826   }
5827 };
5828 
5829 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
5830   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5831       : AAMemoryBehaviorArgument(IRP, A) {}
5832 
5833   /// See AbstractAttribute::initialize(...).
5834   void initialize(Attributor &A) override {
5835     if (Argument *Arg = getAssociatedArgument()) {
5836       if (Arg->hasByValAttr()) {
5837         addKnownBits(NO_WRITES);
5838         removeKnownBits(NO_READS);
5839         removeAssumedBits(NO_READS);
5840       }
5841     }
5842     AAMemoryBehaviorArgument::initialize(A);
5843   }
5844 
5845   /// See AbstractAttribute::updateImpl(...).
5846   ChangeStatus updateImpl(Attributor &A) override {
5847     // TODO: Once we have call site specific value information we can provide
5848     //       call site specific liveness liveness information and then it makes
5849     //       sense to specialize attributes for call sites arguments instead of
5850     //       redirecting requests to the callee argument.
5851     Argument *Arg = getAssociatedArgument();
5852     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5853     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
5854     return clampStateAndIndicateChange(
5855         getState(),
5856         static_cast<const AAMemoryBehavior::StateType &>(ArgAA.getState()));
5857   }
5858 
5859   /// See AbstractAttribute::trackStatistics()
5860   void trackStatistics() const override {
5861     if (isAssumedReadNone())
5862       STATS_DECLTRACK_CSARG_ATTR(readnone)
5863     else if (isAssumedReadOnly())
5864       STATS_DECLTRACK_CSARG_ATTR(readonly)
5865     else if (isAssumedWriteOnly())
5866       STATS_DECLTRACK_CSARG_ATTR(writeonly)
5867   }
5868 };
5869 
5870 /// Memory behavior attribute for a call site return position.
5871 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
5872   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
5873       : AAMemoryBehaviorFloating(IRP, A) {}
5874 
5875   /// See AbstractAttribute::manifest(...).
5876   ChangeStatus manifest(Attributor &A) override {
5877     // We do not annotate returned values.
5878     return ChangeStatus::UNCHANGED;
5879   }
5880 
5881   /// See AbstractAttribute::trackStatistics()
5882   void trackStatistics() const override {}
5883 };
5884 
5885 /// An AA to represent the memory behavior function attributes.
5886 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
5887   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
5888       : AAMemoryBehaviorImpl(IRP, A) {}
5889 
5890   /// See AbstractAttribute::updateImpl(Attributor &A).
5891   virtual ChangeStatus updateImpl(Attributor &A) override;
5892 
5893   /// See AbstractAttribute::manifest(...).
5894   ChangeStatus manifest(Attributor &A) override {
5895     Function &F = cast<Function>(getAnchorValue());
5896     if (isAssumedReadNone()) {
5897       F.removeFnAttr(Attribute::ArgMemOnly);
5898       F.removeFnAttr(Attribute::InaccessibleMemOnly);
5899       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
5900     }
5901     return AAMemoryBehaviorImpl::manifest(A);
5902   }
5903 
5904   /// See AbstractAttribute::trackStatistics()
5905   void trackStatistics() const override {
5906     if (isAssumedReadNone())
5907       STATS_DECLTRACK_FN_ATTR(readnone)
5908     else if (isAssumedReadOnly())
5909       STATS_DECLTRACK_FN_ATTR(readonly)
5910     else if (isAssumedWriteOnly())
5911       STATS_DECLTRACK_FN_ATTR(writeonly)
5912   }
5913 };
5914 
5915 /// AAMemoryBehavior attribute for call sites.
5916 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
5917   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
5918       : AAMemoryBehaviorImpl(IRP, A) {}
5919 
5920   /// See AbstractAttribute::initialize(...).
5921   void initialize(Attributor &A) override {
5922     AAMemoryBehaviorImpl::initialize(A);
5923     Function *F = getAssociatedFunction();
5924     if (!F || !A.isFunctionIPOAmendable(*F)) {
5925       indicatePessimisticFixpoint();
5926       return;
5927     }
5928   }
5929 
5930   /// See AbstractAttribute::updateImpl(...).
5931   ChangeStatus updateImpl(Attributor &A) override {
5932     // TODO: Once we have call site specific value information we can provide
5933     //       call site specific liveness liveness information and then it makes
5934     //       sense to specialize attributes for call sites arguments instead of
5935     //       redirecting requests to the callee argument.
5936     Function *F = getAssociatedFunction();
5937     const IRPosition &FnPos = IRPosition::function(*F);
5938     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
5939     return clampStateAndIndicateChange(
5940         getState(),
5941         static_cast<const AAMemoryBehavior::StateType &>(FnAA.getState()));
5942   }
5943 
5944   /// See AbstractAttribute::trackStatistics()
5945   void trackStatistics() const override {
5946     if (isAssumedReadNone())
5947       STATS_DECLTRACK_CS_ATTR(readnone)
5948     else if (isAssumedReadOnly())
5949       STATS_DECLTRACK_CS_ATTR(readonly)
5950     else if (isAssumedWriteOnly())
5951       STATS_DECLTRACK_CS_ATTR(writeonly)
5952   }
5953 };
5954 
5955 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
5956 
5957   // The current assumed state used to determine a change.
5958   auto AssumedState = getAssumed();
5959 
5960   auto CheckRWInst = [&](Instruction &I) {
5961     // If the instruction has an own memory behavior state, use it to restrict
5962     // the local state. No further analysis is required as the other memory
5963     // state is as optimistic as it gets.
5964     if (const auto *CB = dyn_cast<CallBase>(&I)) {
5965       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
5966           *this, IRPosition::callsite_function(*CB));
5967       intersectAssumedBits(MemBehaviorAA.getAssumed());
5968       return !isAtFixpoint();
5969     }
5970 
5971     // Remove access kind modifiers if necessary.
5972     if (I.mayReadFromMemory())
5973       removeAssumedBits(NO_READS);
5974     if (I.mayWriteToMemory())
5975       removeAssumedBits(NO_WRITES);
5976     return !isAtFixpoint();
5977   };
5978 
5979   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
5980     return indicatePessimisticFixpoint();
5981 
5982   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
5983                                         : ChangeStatus::UNCHANGED;
5984 }
5985 
5986 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
5987 
5988   const IRPosition &IRP = getIRPosition();
5989   const IRPosition &FnPos = IRPosition::function_scope(IRP);
5990   AAMemoryBehavior::StateType &S = getState();
5991 
5992   // First, check the function scope. We take the known information and we avoid
5993   // work if the assumed information implies the current assumed information for
5994   // this attribute. This is a valid for all but byval arguments.
5995   Argument *Arg = IRP.getAssociatedArgument();
5996   AAMemoryBehavior::base_t FnMemAssumedState =
5997       AAMemoryBehavior::StateType::getWorstState();
5998   if (!Arg || !Arg->hasByValAttr()) {
5999     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6000         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6001     FnMemAssumedState = FnMemAA.getAssumed();
6002     S.addKnownBits(FnMemAA.getKnown());
6003     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6004       return ChangeStatus::UNCHANGED;
6005   }
6006 
6007   // Make sure the value is not captured (except through "return"), if
6008   // it is, any information derived would be irrelevant anyway as we cannot
6009   // check the potential aliases introduced by the capture. However, no need
6010   // to fall back to anythign less optimistic than the function state.
6011   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6012       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6013   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6014     S.intersectAssumedBits(FnMemAssumedState);
6015     return ChangeStatus::CHANGED;
6016   }
6017 
6018   // The current assumed state used to determine a change.
6019   auto AssumedState = S.getAssumed();
6020 
6021   // Liveness information to exclude dead users.
6022   // TODO: Take the FnPos once we have call site specific liveness information.
6023   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6024       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6025       /* TrackDependence */ false);
6026 
6027   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6028   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6029     const Use *U = Uses[i];
6030     Instruction *UserI = cast<Instruction>(U->getUser());
6031     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6032                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6033                       << "]\n");
6034     if (A.isAssumedDead(*U, this, &LivenessAA))
6035       continue;
6036 
6037     // Droppable users, e.g., llvm::assume does not actually perform any action.
6038     if (UserI->isDroppable())
6039       continue;
6040 
6041     // Check if the users of UserI should also be visited.
6042     if (followUsersOfUseIn(A, U, UserI))
6043       for (const Use &UserIUse : UserI->uses())
6044         Uses.insert(&UserIUse);
6045 
6046     // If UserI might touch memory we analyze the use in detail.
6047     if (UserI->mayReadOrWriteMemory())
6048       analyzeUseIn(A, U, UserI);
6049   }
6050 
6051   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6052                                         : ChangeStatus::UNCHANGED;
6053 }
6054 
6055 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6056                                                   const Instruction *UserI) {
6057   // The loaded value is unrelated to the pointer argument, no need to
6058   // follow the users of the load.
6059   if (isa<LoadInst>(UserI))
6060     return false;
6061 
6062   // By default we follow all uses assuming UserI might leak information on U,
6063   // we have special handling for call sites operands though.
6064   const auto *CB = dyn_cast<CallBase>(UserI);
6065   if (!CB || !CB->isArgOperand(U))
6066     return true;
6067 
6068   // If the use is a call argument known not to be captured, the users of
6069   // the call do not need to be visited because they have to be unrelated to
6070   // the input. Note that this check is not trivial even though we disallow
6071   // general capturing of the underlying argument. The reason is that the
6072   // call might the argument "through return", which we allow and for which we
6073   // need to check call users.
6074   if (U->get()->getType()->isPointerTy()) {
6075     unsigned ArgNo = CB->getArgOperandNo(U);
6076     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6077         *this, IRPosition::callsite_argument(*CB, ArgNo),
6078         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6079     return !ArgNoCaptureAA.isAssumedNoCapture();
6080   }
6081 
6082   return true;
6083 }
6084 
6085 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6086                                             const Instruction *UserI) {
6087   assert(UserI->mayReadOrWriteMemory());
6088 
6089   switch (UserI->getOpcode()) {
6090   default:
6091     // TODO: Handle all atomics and other side-effect operations we know of.
6092     break;
6093   case Instruction::Load:
6094     // Loads cause the NO_READS property to disappear.
6095     removeAssumedBits(NO_READS);
6096     return;
6097 
6098   case Instruction::Store:
6099     // Stores cause the NO_WRITES property to disappear if the use is the
6100     // pointer operand. Note that we do assume that capturing was taken care of
6101     // somewhere else.
6102     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6103       removeAssumedBits(NO_WRITES);
6104     return;
6105 
6106   case Instruction::Call:
6107   case Instruction::CallBr:
6108   case Instruction::Invoke: {
6109     // For call sites we look at the argument memory behavior attribute (this
6110     // could be recursive!) in order to restrict our own state.
6111     const auto *CB = cast<CallBase>(UserI);
6112 
6113     // Give up on operand bundles.
6114     if (CB->isBundleOperand(U)) {
6115       indicatePessimisticFixpoint();
6116       return;
6117     }
6118 
6119     // Calling a function does read the function pointer, maybe write it if the
6120     // function is self-modifying.
6121     if (CB->isCallee(U)) {
6122       removeAssumedBits(NO_READS);
6123       break;
6124     }
6125 
6126     // Adjust the possible access behavior based on the information on the
6127     // argument.
6128     IRPosition Pos;
6129     if (U->get()->getType()->isPointerTy())
6130       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6131     else
6132       Pos = IRPosition::callsite_function(*CB);
6133     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6134         *this, Pos,
6135         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6136     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6137     // and at least "known".
6138     intersectAssumedBits(MemBehaviorAA.getAssumed());
6139     return;
6140   }
6141   };
6142 
6143   // Generally, look at the "may-properties" and adjust the assumed state if we
6144   // did not trigger special handling before.
6145   if (UserI->mayReadFromMemory())
6146     removeAssumedBits(NO_READS);
6147   if (UserI->mayWriteToMemory())
6148     removeAssumedBits(NO_WRITES);
6149 }
6150 
6151 } // namespace
6152 
6153 /// -------------------- Memory Locations Attributes ---------------------------
6154 /// Includes read-none, argmemonly, inaccessiblememonly,
6155 /// inaccessiblememorargmemonly
6156 /// ----------------------------------------------------------------------------
6157 
6158 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6159     AAMemoryLocation::MemoryLocationsKind MLK) {
6160   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6161     return "all memory";
6162   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6163     return "no memory";
6164   std::string S = "memory:";
6165   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6166     S += "stack,";
6167   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6168     S += "constant,";
6169   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6170     S += "internal global,";
6171   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6172     S += "external global,";
6173   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6174     S += "argument,";
6175   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6176     S += "inaccessible,";
6177   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6178     S += "malloced,";
6179   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6180     S += "unknown,";
6181   S.pop_back();
6182   return S;
6183 }
6184 
6185 namespace {
6186 struct AAMemoryLocationImpl : public AAMemoryLocation {
6187 
6188   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6189       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6190     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6191       AccessKind2Accesses[u] = nullptr;
6192   }
6193 
6194   ~AAMemoryLocationImpl() {
6195     // The AccessSets are allocated via a BumpPtrAllocator, we call
6196     // the destructor manually.
6197     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6198       if (AccessKind2Accesses[u])
6199         AccessKind2Accesses[u]->~AccessSet();
6200   }
6201 
6202   /// See AbstractAttribute::initialize(...).
6203   void initialize(Attributor &A) override {
6204     intersectAssumedBits(BEST_STATE);
6205     getKnownStateFromValue(A, getIRPosition(), getState());
6206     IRAttribute::initialize(A);
6207   }
6208 
6209   /// Return the memory behavior information encoded in the IR for \p IRP.
6210   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6211                                      BitIntegerState &State,
6212                                      bool IgnoreSubsumingPositions = false) {
6213     // For internal functions we ignore `argmemonly` and
6214     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6215     // constant propagation. It is unclear if this is the best way but it is
6216     // unlikely this will cause real performance problems. If we are deriving
6217     // attributes for the anchor function we even remove the attribute in
6218     // addition to ignoring it.
6219     bool UseArgMemOnly = true;
6220     Function *AnchorFn = IRP.getAnchorScope();
6221     if (AnchorFn && A.isRunOn(*AnchorFn))
6222       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6223 
6224     SmallVector<Attribute, 2> Attrs;
6225     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6226     for (const Attribute &Attr : Attrs) {
6227       switch (Attr.getKindAsEnum()) {
6228       case Attribute::ReadNone:
6229         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6230         break;
6231       case Attribute::InaccessibleMemOnly:
6232         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6233         break;
6234       case Attribute::ArgMemOnly:
6235         if (UseArgMemOnly)
6236           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6237         else
6238           IRP.removeAttrs({Attribute::ArgMemOnly});
6239         break;
6240       case Attribute::InaccessibleMemOrArgMemOnly:
6241         if (UseArgMemOnly)
6242           State.addKnownBits(inverseLocation(
6243               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6244         else
6245           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6246         break;
6247       default:
6248         llvm_unreachable("Unexpected attribute!");
6249       }
6250     }
6251   }
6252 
6253   /// See AbstractAttribute::getDeducedAttributes(...).
6254   void getDeducedAttributes(LLVMContext &Ctx,
6255                             SmallVectorImpl<Attribute> &Attrs) const override {
6256     assert(Attrs.size() == 0);
6257     if (isAssumedReadNone()) {
6258       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6259     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6260       if (isAssumedInaccessibleMemOnly())
6261         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6262       else if (isAssumedArgMemOnly())
6263         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6264       else if (isAssumedInaccessibleOrArgMemOnly())
6265         Attrs.push_back(
6266             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6267     }
6268     assert(Attrs.size() <= 1);
6269   }
6270 
6271   /// See AbstractAttribute::manifest(...).
6272   ChangeStatus manifest(Attributor &A) override {
6273     const IRPosition &IRP = getIRPosition();
6274 
6275     // Check if we would improve the existing attributes first.
6276     SmallVector<Attribute, 4> DeducedAttrs;
6277     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6278     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6279           return IRP.hasAttr(Attr.getKindAsEnum(),
6280                              /* IgnoreSubsumingPositions */ true);
6281         }))
6282       return ChangeStatus::UNCHANGED;
6283 
6284     // Clear existing attributes.
6285     IRP.removeAttrs(AttrKinds);
6286     if (isAssumedReadNone())
6287       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6288 
6289     // Use the generic manifest method.
6290     return IRAttribute::manifest(A);
6291   }
6292 
6293   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6294   bool checkForAllAccessesToMemoryKind(
6295       function_ref<bool(const Instruction *, const Value *, AccessKind,
6296                         MemoryLocationsKind)>
6297           Pred,
6298       MemoryLocationsKind RequestedMLK) const override {
6299     if (!isValidState())
6300       return false;
6301 
6302     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6303     if (AssumedMLK == NO_LOCATIONS)
6304       return true;
6305 
6306     unsigned Idx = 0;
6307     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6308          CurMLK *= 2, ++Idx) {
6309       if (CurMLK & RequestedMLK)
6310         continue;
6311 
6312       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6313         for (const AccessInfo &AI : *Accesses)
6314           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6315             return false;
6316     }
6317 
6318     return true;
6319   }
6320 
6321   ChangeStatus indicatePessimisticFixpoint() override {
6322     // If we give up and indicate a pessimistic fixpoint this instruction will
6323     // become an access for all potential access kinds:
6324     // TODO: Add pointers for argmemonly and globals to improve the results of
6325     //       checkForAllAccessesToMemoryKind.
6326     bool Changed = false;
6327     MemoryLocationsKind KnownMLK = getKnown();
6328     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6329     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6330       if (!(CurMLK & KnownMLK))
6331         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6332                                   getAccessKindFromInst(I));
6333     return AAMemoryLocation::indicatePessimisticFixpoint();
6334   }
6335 
6336 protected:
6337   /// Helper struct to tie together an instruction that has a read or write
6338   /// effect with the pointer it accesses (if any).
6339   struct AccessInfo {
6340 
6341     /// The instruction that caused the access.
6342     const Instruction *I;
6343 
6344     /// The base pointer that is accessed, or null if unknown.
6345     const Value *Ptr;
6346 
6347     /// The kind of access (read/write/read+write).
6348     AccessKind Kind;
6349 
6350     bool operator==(const AccessInfo &RHS) const {
6351       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6352     }
6353     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6354       if (LHS.I != RHS.I)
6355         return LHS.I < RHS.I;
6356       if (LHS.Ptr != RHS.Ptr)
6357         return LHS.Ptr < RHS.Ptr;
6358       if (LHS.Kind != RHS.Kind)
6359         return LHS.Kind < RHS.Kind;
6360       return false;
6361     }
6362   };
6363 
6364   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6365   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6366   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6367   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6368 
6369   /// Return the kind(s) of location that may be accessed by \p V.
6370   AAMemoryLocation::MemoryLocationsKind
6371   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6372 
6373   /// Return the access kind as determined by \p I.
6374   AccessKind getAccessKindFromInst(const Instruction *I) {
6375     AccessKind AK = READ_WRITE;
6376     if (I) {
6377       AK = I->mayReadFromMemory() ? READ : NONE;
6378       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6379     }
6380     return AK;
6381   }
6382 
6383   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6384   /// an access of kind \p AK to a \p MLK memory location with the access
6385   /// pointer \p Ptr.
6386   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6387                                  MemoryLocationsKind MLK, const Instruction *I,
6388                                  const Value *Ptr, bool &Changed,
6389                                  AccessKind AK = READ_WRITE) {
6390 
6391     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6392     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6393     if (!Accesses)
6394       Accesses = new (Allocator) AccessSet();
6395     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6396     State.removeAssumedBits(MLK);
6397   }
6398 
6399   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6400   /// arguments, and update the state and access map accordingly.
6401   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6402                           AAMemoryLocation::StateType &State, bool &Changed);
6403 
6404   /// Used to allocate access sets.
6405   BumpPtrAllocator &Allocator;
6406 
6407   /// The set of IR attributes AAMemoryLocation deals with.
6408   static const Attribute::AttrKind AttrKinds[4];
6409 };
6410 
6411 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6412     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6413     Attribute::InaccessibleMemOrArgMemOnly};
6414 
6415 void AAMemoryLocationImpl::categorizePtrValue(
6416     Attributor &A, const Instruction &I, const Value &Ptr,
6417     AAMemoryLocation::StateType &State, bool &Changed) {
6418   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6419                     << Ptr << " ["
6420                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6421 
6422   auto StripGEPCB = [](Value *V) -> Value * {
6423     auto *GEP = dyn_cast<GEPOperator>(V);
6424     while (GEP) {
6425       V = GEP->getPointerOperand();
6426       GEP = dyn_cast<GEPOperator>(V);
6427     }
6428     return V;
6429   };
6430 
6431   auto VisitValueCB = [&](Value &V, const Instruction *,
6432                           AAMemoryLocation::StateType &T,
6433                           bool Stripped) -> bool {
6434     MemoryLocationsKind MLK = NO_LOCATIONS;
6435     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6436     if (isa<UndefValue>(V))
6437       return true;
6438     if (auto *Arg = dyn_cast<Argument>(&V)) {
6439       if (Arg->hasByValAttr())
6440         MLK = NO_LOCAL_MEM;
6441       else
6442         MLK = NO_ARGUMENT_MEM;
6443     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6444       if (GV->hasLocalLinkage())
6445         MLK = NO_GLOBAL_INTERNAL_MEM;
6446       else
6447         MLK = NO_GLOBAL_EXTERNAL_MEM;
6448     } else if (isa<ConstantPointerNull>(V) &&
6449                !NullPointerIsDefined(getAssociatedFunction(),
6450                                      V.getType()->getPointerAddressSpace())) {
6451       return true;
6452     } else if (isa<AllocaInst>(V)) {
6453       MLK = NO_LOCAL_MEM;
6454     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6455       const auto &NoAliasAA =
6456           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6457       if (NoAliasAA.isAssumedNoAlias())
6458         MLK = NO_MALLOCED_MEM;
6459       else
6460         MLK = NO_UNKOWN_MEM;
6461     } else {
6462       MLK = NO_UNKOWN_MEM;
6463     }
6464 
6465     assert(MLK != NO_LOCATIONS && "No location specified!");
6466     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6467                               getAccessKindFromInst(&I));
6468     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6469                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6470                       << "\n");
6471     return true;
6472   };
6473 
6474   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6475           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6476           /* UseValueSimplify */ true,
6477           /* MaxValues */ 32, StripGEPCB)) {
6478     LLVM_DEBUG(
6479         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6480     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6481                               getAccessKindFromInst(&I));
6482   } else {
6483     LLVM_DEBUG(
6484         dbgs()
6485         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6486         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6487   }
6488 }
6489 
6490 AAMemoryLocation::MemoryLocationsKind
6491 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6492                                                   bool &Changed) {
6493   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6494                     << I << "\n");
6495 
6496   AAMemoryLocation::StateType AccessedLocs;
6497   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6498 
6499   if (auto *CB = dyn_cast<CallBase>(&I)) {
6500 
6501     // First check if we assume any memory is access is visible.
6502     const auto &CBMemLocationAA =
6503         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6504     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6505                       << " [" << CBMemLocationAA << "]\n");
6506 
6507     if (CBMemLocationAA.isAssumedReadNone())
6508       return NO_LOCATIONS;
6509 
6510     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6511       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6512                                 Changed, getAccessKindFromInst(&I));
6513       return AccessedLocs.getAssumed();
6514     }
6515 
6516     uint32_t CBAssumedNotAccessedLocs =
6517         CBMemLocationAA.getAssumedNotAccessedLocation();
6518 
6519     // Set the argmemonly and global bit as we handle them separately below.
6520     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6521         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6522 
6523     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6524       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6525         continue;
6526       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6527                                 getAccessKindFromInst(&I));
6528     }
6529 
6530     // Now handle global memory if it might be accessed. This is slightly tricky
6531     // as NO_GLOBAL_MEM has multiple bits set.
6532     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6533     if (HasGlobalAccesses) {
6534       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6535                             AccessKind Kind, MemoryLocationsKind MLK) {
6536         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6537                                   getAccessKindFromInst(&I));
6538         return true;
6539       };
6540       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6541               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6542         return AccessedLocs.getWorstState();
6543     }
6544 
6545     LLVM_DEBUG(
6546         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6547                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6548 
6549     // Now handle argument memory if it might be accessed.
6550     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6551     if (HasArgAccesses) {
6552       for (unsigned ArgNo = 0, E = CB->getNumArgOperands(); ArgNo < E;
6553            ++ArgNo) {
6554 
6555         // Skip non-pointer arguments.
6556         const Value *ArgOp = CB->getArgOperand(ArgNo);
6557         if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6558           continue;
6559 
6560         // Skip readnone arguments.
6561         const IRPosition &ArgOpIRP = IRPosition::callsite_argument(*CB, ArgNo);
6562         const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6563             *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6564 
6565         if (ArgOpMemLocationAA.isAssumedReadNone())
6566           continue;
6567 
6568         // Categorize potentially accessed pointer arguments as if there was an
6569         // access instruction with them as pointer.
6570         categorizePtrValue(A, I, *ArgOp, AccessedLocs, Changed);
6571       }
6572     }
6573 
6574     LLVM_DEBUG(
6575         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6576                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6577 
6578     return AccessedLocs.getAssumed();
6579   }
6580 
6581   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6582     LLVM_DEBUG(
6583         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6584                << I << " [" << *Ptr << "]\n");
6585     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6586     return AccessedLocs.getAssumed();
6587   }
6588 
6589   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6590                     << I << "\n");
6591   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6592                             getAccessKindFromInst(&I));
6593   return AccessedLocs.getAssumed();
6594 }
6595 
6596 /// An AA to represent the memory behavior function attributes.
6597 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6598   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6599       : AAMemoryLocationImpl(IRP, A) {}
6600 
6601   /// See AbstractAttribute::updateImpl(Attributor &A).
6602   virtual ChangeStatus updateImpl(Attributor &A) override {
6603 
6604     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6605         *this, getIRPosition(), /* TrackDependence */ false);
6606     if (MemBehaviorAA.isAssumedReadNone()) {
6607       if (MemBehaviorAA.isKnownReadNone())
6608         return indicateOptimisticFixpoint();
6609       assert(isAssumedReadNone() &&
6610              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6611       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6612       return ChangeStatus::UNCHANGED;
6613     }
6614 
6615     // The current assumed state used to determine a change.
6616     auto AssumedState = getAssumed();
6617     bool Changed = false;
6618 
6619     auto CheckRWInst = [&](Instruction &I) {
6620       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6621       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6622                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6623       removeAssumedBits(inverseLocation(MLK, false, false));
6624       return true;
6625     };
6626 
6627     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6628       return indicatePessimisticFixpoint();
6629 
6630     Changed |= AssumedState != getAssumed();
6631     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6632   }
6633 
6634   /// See AbstractAttribute::trackStatistics()
6635   void trackStatistics() const override {
6636     if (isAssumedReadNone())
6637       STATS_DECLTRACK_FN_ATTR(readnone)
6638     else if (isAssumedArgMemOnly())
6639       STATS_DECLTRACK_FN_ATTR(argmemonly)
6640     else if (isAssumedInaccessibleMemOnly())
6641       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6642     else if (isAssumedInaccessibleOrArgMemOnly())
6643       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6644   }
6645 };
6646 
6647 /// AAMemoryLocation attribute for call sites.
6648 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6649   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6650       : AAMemoryLocationImpl(IRP, A) {}
6651 
6652   /// See AbstractAttribute::initialize(...).
6653   void initialize(Attributor &A) override {
6654     AAMemoryLocationImpl::initialize(A);
6655     Function *F = getAssociatedFunction();
6656     if (!F || !A.isFunctionIPOAmendable(*F)) {
6657       indicatePessimisticFixpoint();
6658       return;
6659     }
6660   }
6661 
6662   /// See AbstractAttribute::updateImpl(...).
6663   ChangeStatus updateImpl(Attributor &A) override {
6664     // TODO: Once we have call site specific value information we can provide
6665     //       call site specific liveness liveness information and then it makes
6666     //       sense to specialize attributes for call sites arguments instead of
6667     //       redirecting requests to the callee argument.
6668     Function *F = getAssociatedFunction();
6669     const IRPosition &FnPos = IRPosition::function(*F);
6670     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6671     bool Changed = false;
6672     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6673                           AccessKind Kind, MemoryLocationsKind MLK) {
6674       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6675                                 getAccessKindFromInst(I));
6676       return true;
6677     };
6678     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6679       return indicatePessimisticFixpoint();
6680     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6681   }
6682 
6683   /// See AbstractAttribute::trackStatistics()
6684   void trackStatistics() const override {
6685     if (isAssumedReadNone())
6686       STATS_DECLTRACK_CS_ATTR(readnone)
6687   }
6688 };
6689 
6690 /// ------------------ Value Constant Range Attribute -------------------------
6691 
6692 struct AAValueConstantRangeImpl : AAValueConstantRange {
6693   using StateType = IntegerRangeState;
6694   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6695       : AAValueConstantRange(IRP, A) {}
6696 
6697   /// See AbstractAttribute::getAsStr().
6698   const std::string getAsStr() const override {
6699     std::string Str;
6700     llvm::raw_string_ostream OS(Str);
6701     OS << "range(" << getBitWidth() << ")<";
6702     getKnown().print(OS);
6703     OS << " / ";
6704     getAssumed().print(OS);
6705     OS << ">";
6706     return OS.str();
6707   }
6708 
6709   /// Helper function to get a SCEV expr for the associated value at program
6710   /// point \p I.
6711   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6712     if (!getAnchorScope())
6713       return nullptr;
6714 
6715     ScalarEvolution *SE =
6716         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6717             *getAnchorScope());
6718 
6719     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6720         *getAnchorScope());
6721 
6722     if (!SE || !LI)
6723       return nullptr;
6724 
6725     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6726     if (!I)
6727       return S;
6728 
6729     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6730   }
6731 
6732   /// Helper function to get a range from SCEV for the associated value at
6733   /// program point \p I.
6734   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6735                                          const Instruction *I = nullptr) const {
6736     if (!getAnchorScope())
6737       return getWorstState(getBitWidth());
6738 
6739     ScalarEvolution *SE =
6740         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6741             *getAnchorScope());
6742 
6743     const SCEV *S = getSCEV(A, I);
6744     if (!SE || !S)
6745       return getWorstState(getBitWidth());
6746 
6747     return SE->getUnsignedRange(S);
6748   }
6749 
6750   /// Helper function to get a range from LVI for the associated value at
6751   /// program point \p I.
6752   ConstantRange
6753   getConstantRangeFromLVI(Attributor &A,
6754                           const Instruction *CtxI = nullptr) const {
6755     if (!getAnchorScope())
6756       return getWorstState(getBitWidth());
6757 
6758     LazyValueInfo *LVI =
6759         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6760             *getAnchorScope());
6761 
6762     if (!LVI || !CtxI)
6763       return getWorstState(getBitWidth());
6764     return LVI->getConstantRange(&getAssociatedValue(),
6765                                  const_cast<BasicBlock *>(CtxI->getParent()),
6766                                  const_cast<Instruction *>(CtxI));
6767   }
6768 
6769   /// See AAValueConstantRange::getKnownConstantRange(..).
6770   ConstantRange
6771   getKnownConstantRange(Attributor &A,
6772                         const Instruction *CtxI = nullptr) const override {
6773     if (!CtxI || CtxI == getCtxI())
6774       return getKnown();
6775 
6776     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6777     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6778     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6779   }
6780 
6781   /// See AAValueConstantRange::getAssumedConstantRange(..).
6782   ConstantRange
6783   getAssumedConstantRange(Attributor &A,
6784                           const Instruction *CtxI = nullptr) const override {
6785     // TODO: Make SCEV use Attributor assumption.
6786     //       We may be able to bound a variable range via assumptions in
6787     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6788     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6789 
6790     if (!CtxI || CtxI == getCtxI())
6791       return getAssumed();
6792 
6793     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6794     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6795     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
6796   }
6797 
6798   /// See AbstractAttribute::initialize(..).
6799   void initialize(Attributor &A) override {
6800     // Intersect a range given by SCEV.
6801     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
6802 
6803     // Intersect a range given by LVI.
6804     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
6805   }
6806 
6807   /// Helper function to create MDNode for range metadata.
6808   static MDNode *
6809   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
6810                             const ConstantRange &AssumedConstantRange) {
6811     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
6812                                   Ty, AssumedConstantRange.getLower())),
6813                               ConstantAsMetadata::get(ConstantInt::get(
6814                                   Ty, AssumedConstantRange.getUpper()))};
6815     return MDNode::get(Ctx, LowAndHigh);
6816   }
6817 
6818   /// Return true if \p Assumed is included in \p KnownRanges.
6819   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
6820 
6821     if (Assumed.isFullSet())
6822       return false;
6823 
6824     if (!KnownRanges)
6825       return true;
6826 
6827     // If multiple ranges are annotated in IR, we give up to annotate assumed
6828     // range for now.
6829 
6830     // TODO:  If there exists a known range which containts assumed range, we
6831     // can say assumed range is better.
6832     if (KnownRanges->getNumOperands() > 2)
6833       return false;
6834 
6835     ConstantInt *Lower =
6836         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
6837     ConstantInt *Upper =
6838         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
6839 
6840     ConstantRange Known(Lower->getValue(), Upper->getValue());
6841     return Known.contains(Assumed) && Known != Assumed;
6842   }
6843 
6844   /// Helper function to set range metadata.
6845   static bool
6846   setRangeMetadataIfisBetterRange(Instruction *I,
6847                                   const ConstantRange &AssumedConstantRange) {
6848     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
6849     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
6850       if (!AssumedConstantRange.isEmptySet()) {
6851         I->setMetadata(LLVMContext::MD_range,
6852                        getMDNodeForConstantRange(I->getType(), I->getContext(),
6853                                                  AssumedConstantRange));
6854         return true;
6855       }
6856     }
6857     return false;
6858   }
6859 
6860   /// See AbstractAttribute::manifest()
6861   ChangeStatus manifest(Attributor &A) override {
6862     ChangeStatus Changed = ChangeStatus::UNCHANGED;
6863     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
6864     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
6865 
6866     auto &V = getAssociatedValue();
6867     if (!AssumedConstantRange.isEmptySet() &&
6868         !AssumedConstantRange.isSingleElement()) {
6869       if (Instruction *I = dyn_cast<Instruction>(&V))
6870         if (isa<CallInst>(I) || isa<LoadInst>(I))
6871           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
6872             Changed = ChangeStatus::CHANGED;
6873     }
6874 
6875     return Changed;
6876   }
6877 };
6878 
6879 struct AAValueConstantRangeArgument final
6880     : AAArgumentFromCallSiteArguments<
6881           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
6882   using Base = AAArgumentFromCallSiteArguments<
6883       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
6884   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
6885       : Base(IRP, A) {}
6886 
6887   /// See AbstractAttribute::initialize(..).
6888   void initialize(Attributor &A) override {
6889     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
6890       indicatePessimisticFixpoint();
6891     } else {
6892       Base::initialize(A);
6893     }
6894   }
6895 
6896   /// See AbstractAttribute::trackStatistics()
6897   void trackStatistics() const override {
6898     STATS_DECLTRACK_ARG_ATTR(value_range)
6899   }
6900 };
6901 
6902 struct AAValueConstantRangeReturned
6903     : AAReturnedFromReturnedValues<AAValueConstantRange,
6904                                    AAValueConstantRangeImpl> {
6905   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
6906                                             AAValueConstantRangeImpl>;
6907   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
6908       : Base(IRP, A) {}
6909 
6910   /// See AbstractAttribute::initialize(...).
6911   void initialize(Attributor &A) override {}
6912 
6913   /// See AbstractAttribute::trackStatistics()
6914   void trackStatistics() const override {
6915     STATS_DECLTRACK_FNRET_ATTR(value_range)
6916   }
6917 };
6918 
6919 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
6920   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
6921       : AAValueConstantRangeImpl(IRP, A) {}
6922 
6923   /// See AbstractAttribute::initialize(...).
6924   void initialize(Attributor &A) override {
6925     AAValueConstantRangeImpl::initialize(A);
6926     Value &V = getAssociatedValue();
6927 
6928     if (auto *C = dyn_cast<ConstantInt>(&V)) {
6929       unionAssumed(ConstantRange(C->getValue()));
6930       indicateOptimisticFixpoint();
6931       return;
6932     }
6933 
6934     if (isa<UndefValue>(&V)) {
6935       // Collapse the undef state to 0.
6936       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
6937       indicateOptimisticFixpoint();
6938       return;
6939     }
6940 
6941     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
6942       return;
6943     // If it is a load instruction with range metadata, use it.
6944     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
6945       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
6946         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
6947         return;
6948       }
6949 
6950     // We can work with PHI and select instruction as we traverse their operands
6951     // during update.
6952     if (isa<SelectInst>(V) || isa<PHINode>(V))
6953       return;
6954 
6955     // Otherwise we give up.
6956     indicatePessimisticFixpoint();
6957 
6958     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
6959                       << getAssociatedValue() << "\n");
6960   }
6961 
6962   bool calculateBinaryOperator(
6963       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
6964       const Instruction *CtxI,
6965       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6966     Value *LHS = BinOp->getOperand(0);
6967     Value *RHS = BinOp->getOperand(1);
6968     // TODO: Allow non integers as well.
6969     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
6970       return false;
6971 
6972     auto &LHSAA =
6973         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
6974     QuerriedAAs.push_back(&LHSAA);
6975     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
6976 
6977     auto &RHSAA =
6978         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
6979     QuerriedAAs.push_back(&RHSAA);
6980     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
6981 
6982     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
6983 
6984     T.unionAssumed(AssumedRange);
6985 
6986     // TODO: Track a known state too.
6987 
6988     return T.isValidState();
6989   }
6990 
6991   bool calculateCastInst(
6992       Attributor &A, CastInst *CastI, IntegerRangeState &T,
6993       const Instruction *CtxI,
6994       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
6995     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
6996     // TODO: Allow non integers as well.
6997     Value &OpV = *CastI->getOperand(0);
6998     if (!OpV.getType()->isIntegerTy())
6999       return false;
7000 
7001     auto &OpAA =
7002         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7003     QuerriedAAs.push_back(&OpAA);
7004     T.unionAssumed(
7005         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7006     return T.isValidState();
7007   }
7008 
7009   bool
7010   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7011                    const Instruction *CtxI,
7012                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7013     Value *LHS = CmpI->getOperand(0);
7014     Value *RHS = CmpI->getOperand(1);
7015     // TODO: Allow non integers as well.
7016     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7017       return false;
7018 
7019     auto &LHSAA =
7020         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7021     QuerriedAAs.push_back(&LHSAA);
7022     auto &RHSAA =
7023         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7024     QuerriedAAs.push_back(&RHSAA);
7025 
7026     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7027     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7028 
7029     // If one of them is empty set, we can't decide.
7030     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7031       return true;
7032 
7033     bool MustTrue = false, MustFalse = false;
7034 
7035     auto AllowedRegion =
7036         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7037 
7038     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7039         CmpI->getPredicate(), RHSAARange);
7040 
7041     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7042       MustFalse = true;
7043 
7044     if (SatisfyingRegion.contains(LHSAARange))
7045       MustTrue = true;
7046 
7047     assert((!MustTrue || !MustFalse) &&
7048            "Either MustTrue or MustFalse should be false!");
7049 
7050     if (MustTrue)
7051       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7052     else if (MustFalse)
7053       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7054     else
7055       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7056 
7057     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7058                       << " " << RHSAA << "\n");
7059 
7060     // TODO: Track a known state too.
7061     return T.isValidState();
7062   }
7063 
7064   /// See AbstractAttribute::updateImpl(...).
7065   ChangeStatus updateImpl(Attributor &A) override {
7066     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7067                             IntegerRangeState &T, bool Stripped) -> bool {
7068       Instruction *I = dyn_cast<Instruction>(&V);
7069       if (!I || isa<CallBase>(I)) {
7070 
7071         // If the value is not instruction, we query AA to Attributor.
7072         const auto &AA =
7073             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7074 
7075         // Clamp operator is not used to utilize a program point CtxI.
7076         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7077 
7078         return T.isValidState();
7079       }
7080 
7081       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7082       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7083         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7084           return false;
7085       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7086         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7087           return false;
7088       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7089         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7090           return false;
7091       } else {
7092         // Give up with other instructions.
7093         // TODO: Add other instructions
7094 
7095         T.indicatePessimisticFixpoint();
7096         return false;
7097       }
7098 
7099       // Catch circular reasoning in a pessimistic way for now.
7100       // TODO: Check how the range evolves and if we stripped anything, see also
7101       //       AADereferenceable or AAAlign for similar situations.
7102       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7103         if (QueriedAA != this)
7104           continue;
7105         // If we are in a stady state we do not need to worry.
7106         if (T.getAssumed() == getState().getAssumed())
7107           continue;
7108         T.indicatePessimisticFixpoint();
7109       }
7110 
7111       return T.isValidState();
7112     };
7113 
7114     IntegerRangeState T(getBitWidth());
7115 
7116     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7117             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7118             /* UseValueSimplify */ false))
7119       return indicatePessimisticFixpoint();
7120 
7121     return clampStateAndIndicateChange(getState(), T);
7122   }
7123 
7124   /// See AbstractAttribute::trackStatistics()
7125   void trackStatistics() const override {
7126     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7127   }
7128 };
7129 
7130 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7131   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7132       : AAValueConstantRangeImpl(IRP, A) {}
7133 
7134   /// See AbstractAttribute::initialize(...).
7135   ChangeStatus updateImpl(Attributor &A) override {
7136     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7137                      "not be called");
7138   }
7139 
7140   /// See AbstractAttribute::trackStatistics()
7141   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7142 };
7143 
7144 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7145   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7146       : AAValueConstantRangeFunction(IRP, A) {}
7147 
7148   /// See AbstractAttribute::trackStatistics()
7149   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7150 };
7151 
7152 struct AAValueConstantRangeCallSiteReturned
7153     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7154                                      AAValueConstantRangeImpl> {
7155   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7156       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7157                                        AAValueConstantRangeImpl>(IRP, A) {}
7158 
7159   /// See AbstractAttribute::initialize(...).
7160   void initialize(Attributor &A) override {
7161     // If it is a load instruction with range metadata, use the metadata.
7162     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7163       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7164         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7165 
7166     AAValueConstantRangeImpl::initialize(A);
7167   }
7168 
7169   /// See AbstractAttribute::trackStatistics()
7170   void trackStatistics() const override {
7171     STATS_DECLTRACK_CSRET_ATTR(value_range)
7172   }
7173 };
7174 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7175   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7176       : AAValueConstantRangeFloating(IRP, A) {}
7177 
7178   /// See AbstractAttribute::trackStatistics()
7179   void trackStatistics() const override {
7180     STATS_DECLTRACK_CSARG_ATTR(value_range)
7181   }
7182 };
7183 
7184 /// ------------------ Potential Values Attribute -------------------------
7185 
7186 struct AAPotentialValuesImpl : AAPotentialValues {
7187   using StateType = PotentialConstantIntValuesState;
7188 
7189   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7190       : AAPotentialValues(IRP, A) {}
7191 
7192   /// See AbstractAttribute::getAsStr().
7193   const std::string getAsStr() const override {
7194     std::string Str;
7195     llvm::raw_string_ostream OS(Str);
7196     OS << getState();
7197     return OS.str();
7198   }
7199 
7200   /// See AbstractAttribute::updateImpl(...).
7201   ChangeStatus updateImpl(Attributor &A) override {
7202     return indicatePessimisticFixpoint();
7203   }
7204 };
7205 
7206 struct AAPotentialValuesArgument final
7207     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7208                                       PotentialConstantIntValuesState> {
7209   using Base =
7210       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7211                                       PotentialConstantIntValuesState>;
7212   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7213       : Base(IRP, A) {}
7214 
7215   /// See AbstractAttribute::initialize(..).
7216   void initialize(Attributor &A) override {
7217     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7218       indicatePessimisticFixpoint();
7219     } else {
7220       Base::initialize(A);
7221     }
7222   }
7223 
7224   /// See AbstractAttribute::trackStatistics()
7225   void trackStatistics() const override {
7226     STATS_DECLTRACK_ARG_ATTR(potential_values)
7227   }
7228 };
7229 
7230 struct AAPotentialValuesReturned
7231     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7232   using Base =
7233       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7234   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7235       : Base(IRP, A) {}
7236 
7237   /// See AbstractAttribute::trackStatistics()
7238   void trackStatistics() const override {
7239     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7240   }
7241 };
7242 
7243 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7244   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7245       : AAPotentialValuesImpl(IRP, A) {}
7246 
7247   /// See AbstractAttribute::initialize(..).
7248   void initialize(Attributor &A) override {
7249     Value &V = getAssociatedValue();
7250 
7251     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7252       unionAssumed(C->getValue());
7253       indicateOptimisticFixpoint();
7254       return;
7255     }
7256 
7257     if (isa<UndefValue>(&V)) {
7258       // Collapse the undef state to 0.
7259       unionAssumed(
7260           APInt(/* numBits */ getAssociatedType()->getIntegerBitWidth(),
7261                 /* val */ 0));
7262       indicateOptimisticFixpoint();
7263       return;
7264     }
7265 
7266     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7267       return;
7268 
7269     if (isa<SelectInst>(V) || isa<PHINode>(V))
7270       return;
7271 
7272     indicatePessimisticFixpoint();
7273 
7274     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7275                       << getAssociatedValue() << "\n");
7276   }
7277 
7278   /// See AbstractAttribute::trackStatistics()
7279   void trackStatistics() const override {
7280     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7281   }
7282 };
7283 
7284 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7285   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7286       : AAPotentialValuesImpl(IRP, A) {}
7287 
7288   /// See AbstractAttribute::initialize(...).
7289   ChangeStatus updateImpl(Attributor &A) override {
7290     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7291                      "not be called");
7292   }
7293 
7294   /// See AbstractAttribute::trackStatistics()
7295   void trackStatistics() const override {
7296     STATS_DECLTRACK_FN_ATTR(potential_values)
7297   }
7298 };
7299 
7300 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7301   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7302       : AAPotentialValuesFunction(IRP, A) {}
7303 
7304   /// See AbstractAttribute::trackStatistics()
7305   void trackStatistics() const override {
7306     STATS_DECLTRACK_CS_ATTR(potential_values)
7307   }
7308 };
7309 
7310 struct AAPotentialValuesCallSiteReturned
7311     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7312   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7313       : AACallSiteReturnedFromReturned<AAPotentialValues,
7314                                        AAPotentialValuesImpl>(IRP, A) {}
7315 
7316   /// See AbstractAttribute::trackStatistics()
7317   void trackStatistics() const override {
7318     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7319   }
7320 };
7321 
7322 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7323   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7324       : AAPotentialValuesFloating(IRP, A) {}
7325 
7326   /// See AbstractAttribute::trackStatistics()
7327   void trackStatistics() const override {
7328     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7329   }
7330 };
7331 
7332 } // namespace
7333 
7334 const char AAReturnedValues::ID = 0;
7335 const char AANoUnwind::ID = 0;
7336 const char AANoSync::ID = 0;
7337 const char AANoFree::ID = 0;
7338 const char AANonNull::ID = 0;
7339 const char AANoRecurse::ID = 0;
7340 const char AAWillReturn::ID = 0;
7341 const char AAUndefinedBehavior::ID = 0;
7342 const char AANoAlias::ID = 0;
7343 const char AAReachability::ID = 0;
7344 const char AANoReturn::ID = 0;
7345 const char AAIsDead::ID = 0;
7346 const char AADereferenceable::ID = 0;
7347 const char AAAlign::ID = 0;
7348 const char AANoCapture::ID = 0;
7349 const char AAValueSimplify::ID = 0;
7350 const char AAHeapToStack::ID = 0;
7351 const char AAPrivatizablePtr::ID = 0;
7352 const char AAMemoryBehavior::ID = 0;
7353 const char AAMemoryLocation::ID = 0;
7354 const char AAValueConstantRange::ID = 0;
7355 const char AAPotentialValues::ID = 0;
7356 
7357 // Macro magic to create the static generator function for attributes that
7358 // follow the naming scheme.
7359 
7360 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
7361   case IRPosition::PK:                                                         \
7362     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
7363 
7364 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
7365   case IRPosition::PK:                                                         \
7366     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
7367     ++NumAAs;                                                                  \
7368     break;
7369 
7370 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
7371   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7372     CLASS *AA = nullptr;                                                       \
7373     switch (IRP.getPositionKind()) {                                           \
7374       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7375       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7376       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7377       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7378       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7379       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7380       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7381       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7382     }                                                                          \
7383     return *AA;                                                                \
7384   }
7385 
7386 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
7387   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7388     CLASS *AA = nullptr;                                                       \
7389     switch (IRP.getPositionKind()) {                                           \
7390       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7391       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
7392       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7393       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7394       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7395       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7396       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7397       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7398     }                                                                          \
7399     return *AA;                                                                \
7400   }
7401 
7402 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
7403   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7404     CLASS *AA = nullptr;                                                       \
7405     switch (IRP.getPositionKind()) {                                           \
7406       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7407       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7408       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7409       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7410       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7411       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
7412       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7413       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7414     }                                                                          \
7415     return *AA;                                                                \
7416   }
7417 
7418 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
7419   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7420     CLASS *AA = nullptr;                                                       \
7421     switch (IRP.getPositionKind()) {                                           \
7422       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7423       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
7424       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
7425       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7426       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
7427       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
7428       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
7429       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7430     }                                                                          \
7431     return *AA;                                                                \
7432   }
7433 
7434 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
7435   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
7436     CLASS *AA = nullptr;                                                       \
7437     switch (IRP.getPositionKind()) {                                           \
7438       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
7439       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
7440       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
7441       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
7442       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
7443       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
7444       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
7445       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
7446     }                                                                          \
7447     return *AA;                                                                \
7448   }
7449 
7450 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
7451 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
7452 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
7453 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
7454 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
7455 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
7456 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
7457 
7458 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
7459 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
7460 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
7461 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
7462 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
7463 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
7464 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
7465 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
7466 
7467 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
7468 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
7469 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
7470 
7471 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
7472 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
7473 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
7474 
7475 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
7476 
7477 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
7478 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
7479 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
7480 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
7481 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
7482 #undef SWITCH_PK_CREATE
7483 #undef SWITCH_PK_INV
7484