1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LazyValueInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/FileSystem.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
51 static cl::opt<bool> ManifestInternal(
52     "attributor-manifest-internal", cl::Hidden,
53     cl::desc("Manifest Attributor internal string attributes."),
54     cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57                                        cl::Hidden);
58 
59 template <>
60 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
61 
62 static cl::opt<unsigned, true> MaxPotentialValues(
63     "attributor-max-potential-values", cl::Hidden,
64     cl::desc("Maximum number of potential values to be "
65              "tracked for each position."),
66     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
67     cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 //  void trackStatistics() const override {
78 //    STATS_DECLTRACK_ARG_ATTR(returned)
79 //  }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
85   ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG)                                            \
89   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
92   {                                                                            \
93     STATS_DECL(NAME, TYPE, MSG)                                                \
94     STATS_TRACK(NAME, TYPE)                                                    \
95   }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
97   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSArguments,                                           \
100                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
107                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSReturn,                                              \
110                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
112   STATS_DECLTRACK(NAME, Floating,                                              \
113                   ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS)                                                   \
119   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
120     return OS << static_cast<const AbstractAttribute &>(AA);                   \
121   }
122 
123 PIPE_OPERATOR(AAIsDead)
124 PIPE_OPERATOR(AANoUnwind)
125 PIPE_OPERATOR(AANoSync)
126 PIPE_OPERATOR(AANoRecurse)
127 PIPE_OPERATOR(AAWillReturn)
128 PIPE_OPERATOR(AANoReturn)
129 PIPE_OPERATOR(AAReturnedValues)
130 PIPE_OPERATOR(AANonNull)
131 PIPE_OPERATOR(AANoAlias)
132 PIPE_OPERATOR(AADereferenceable)
133 PIPE_OPERATOR(AAAlign)
134 PIPE_OPERATOR(AANoCapture)
135 PIPE_OPERATOR(AAValueSimplify)
136 PIPE_OPERATOR(AANoFree)
137 PIPE_OPERATOR(AAHeapToStack)
138 PIPE_OPERATOR(AAReachability)
139 PIPE_OPERATOR(AAMemoryBehavior)
140 PIPE_OPERATOR(AAMemoryLocation)
141 PIPE_OPERATOR(AAValueConstantRange)
142 PIPE_OPERATOR(AAPrivatizablePtr)
143 PIPE_OPERATOR(AAUndefinedBehavior)
144 PIPE_OPERATOR(AAPotentialValues)
145 PIPE_OPERATOR(AANoUndef)
146 PIPE_OPERATOR(AACallEdges)
147 PIPE_OPERATOR(AAFunctionReachability)
148 PIPE_OPERATOR(AAPointerInfo)
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
153 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
154                                                      const DerefState &R) {
155   ChangeStatus CS0 =
156       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158   return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167                                       bool AllowVolatile) {
168   if (!AllowVolatile && I->isVolatile())
169     return nullptr;
170 
171   if (auto *LI = dyn_cast<LoadInst>(I)) {
172     return LI->getPointerOperand();
173   }
174 
175   if (auto *SI = dyn_cast<StoreInst>(I)) {
176     return SI->getPointerOperand();
177   }
178 
179   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180     return CXI->getPointerOperand();
181   }
182 
183   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184     return RMWI->getPointerOperand();
185   }
186 
187   return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 ///       already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199                                int64_t Offset, IRBuilder<NoFolder> &IRB,
200                                const DataLayout &DL) {
201   assert(Offset >= 0 && "Negative offset not supported yet!");
202   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203                     << "-bytes as " << *ResTy << "\n");
204 
205   if (Offset) {
206     SmallVector<Value *, 4> Indices;
207     std::string GEPName = Ptr->getName().str() + ".0";
208 
209     // Add 0 index to look through the pointer.
210     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
211            "Offset out of bounds");
212     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
213 
214     Type *Ty = PtrElemTy;
215     do {
216       auto *STy = dyn_cast<StructType>(Ty);
217       if (!STy)
218         // Non-aggregate type, we cast and make byte-wise progress now.
219         break;
220 
221       const StructLayout *SL = DL.getStructLayout(STy);
222       if (int64_t(SL->getSizeInBytes()) < Offset)
223         break;
224 
225       uint64_t Idx = SL->getElementContainingOffset(Offset);
226       assert(Idx < STy->getNumElements() && "Offset calculation error!");
227       uint64_t Rem = Offset - SL->getElementOffset(Idx);
228       Ty = STy->getElementType(Idx);
229 
230       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
231                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
232 
233       GEPName += "." + std::to_string(Idx);
234       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
235       Offset = Rem;
236     } while (Offset);
237 
238     // Create a GEP for the indices collected above.
239     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
240 
241     // If an offset is left we use byte-wise adjustment.
242     if (Offset) {
243       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
244       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
245                           GEPName + ".b" + Twine(Offset));
246     }
247   }
248 
249   // Ensure the result has the requested type.
250   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
251 
252   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
253   return Ptr;
254 }
255 
256 /// Recursively visit all values that might become \p IRP at some point. This
257 /// will be done by looking through cast instructions, selects, phis, and calls
258 /// with the "returned" attribute. Once we cannot look through the value any
259 /// further, the callback \p VisitValueCB is invoked and passed the current
260 /// value, the \p State, and a flag to indicate if we stripped anything.
261 /// Stripped means that we unpacked the value associated with \p IRP at least
262 /// once. Note that the value used for the callback may still be the value
263 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
264 /// we will never visit more values than specified by \p MaxValues.
265 template <typename StateTy>
266 static bool genericValueTraversal(
267     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
268     StateTy &State,
269     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
270         VisitValueCB,
271     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
272     function_ref<Value *(Value *)> StripCB = nullptr) {
273 
274   const AAIsDead *LivenessAA = nullptr;
275   if (IRP.getAnchorScope())
276     LivenessAA = &A.getAAFor<AAIsDead>(
277         QueryingAA,
278         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
279         DepClassTy::NONE);
280   bool AnyDead = false;
281 
282   Value *InitialV = &IRP.getAssociatedValue();
283   using Item = std::pair<Value *, const Instruction *>;
284   SmallSet<Item, 16> Visited;
285   SmallVector<Item, 16> Worklist;
286   Worklist.push_back({InitialV, CtxI});
287 
288   int Iteration = 0;
289   do {
290     Item I = Worklist.pop_back_val();
291     Value *V = I.first;
292     CtxI = I.second;
293     if (StripCB)
294       V = StripCB(V);
295 
296     // Check if we should process the current value. To prevent endless
297     // recursion keep a record of the values we followed!
298     if (!Visited.insert(I).second)
299       continue;
300 
301     // Make sure we limit the compile time for complex expressions.
302     if (Iteration++ >= MaxValues)
303       return false;
304 
305     // Explicitly look through calls with a "returned" attribute if we do
306     // not have a pointer as stripPointerCasts only works on them.
307     Value *NewV = nullptr;
308     if (V->getType()->isPointerTy()) {
309       NewV = V->stripPointerCasts();
310     } else {
311       auto *CB = dyn_cast<CallBase>(V);
312       if (CB && CB->getCalledFunction()) {
313         for (Argument &Arg : CB->getCalledFunction()->args())
314           if (Arg.hasReturnedAttr()) {
315             NewV = CB->getArgOperand(Arg.getArgNo());
316             break;
317           }
318       }
319     }
320     if (NewV && NewV != V) {
321       Worklist.push_back({NewV, CtxI});
322       continue;
323     }
324 
325     // Look through select instructions, visit assumed potential values.
326     if (auto *SI = dyn_cast<SelectInst>(V)) {
327       bool UsedAssumedInformation = false;
328       Optional<Constant *> C = A.getAssumedConstant(
329           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
330       bool NoValueYet = !C.hasValue();
331       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
332         continue;
333       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
334         if (CI->isZero())
335           Worklist.push_back({SI->getFalseValue(), CtxI});
336         else
337           Worklist.push_back({SI->getTrueValue(), CtxI});
338         continue;
339       }
340       // We could not simplify the condition, assume both values.(
341       Worklist.push_back({SI->getTrueValue(), CtxI});
342       Worklist.push_back({SI->getFalseValue(), CtxI});
343       continue;
344     }
345 
346     // Look through phi nodes, visit all live operands.
347     if (auto *PHI = dyn_cast<PHINode>(V)) {
348       assert(LivenessAA &&
349              "Expected liveness in the presence of instructions!");
350       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
351         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
352         bool UsedAssumedInformation = false;
353         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
354                             LivenessAA, UsedAssumedInformation,
355                             /* CheckBBLivenessOnly */ true)) {
356           AnyDead = true;
357           continue;
358         }
359         Worklist.push_back(
360             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
361       }
362       continue;
363     }
364 
365     if (UseValueSimplify && !isa<Constant>(V)) {
366       bool UsedAssumedInformation = false;
367       Optional<Value *> SimpleV =
368           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
369       if (!SimpleV.hasValue())
370         continue;
371       if (!SimpleV.getValue())
372         return false;
373       Value *NewV = SimpleV.getValue();
374       if (NewV != V) {
375         Worklist.push_back({NewV, CtxI});
376         continue;
377       }
378     }
379 
380     // Once a leaf is reached we inform the user through the callback.
381     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
382       return false;
383   } while (!Worklist.empty());
384 
385   // If we actually used liveness information so we have to record a dependence.
386   if (AnyDead)
387     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
388 
389   // All values have been visited.
390   return true;
391 }
392 
393 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
394                                      SmallVectorImpl<Value *> &Objects,
395                                      const AbstractAttribute &QueryingAA,
396                                      const Instruction *CtxI) {
397   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
398   SmallPtrSet<Value *, 8> SeenObjects;
399   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
400                                      SmallVectorImpl<Value *> &Objects,
401                                      bool) -> bool {
402     if (SeenObjects.insert(&Val).second)
403       Objects.push_back(&Val);
404     return true;
405   };
406   if (!genericValueTraversal<decltype(Objects)>(
407           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
408           true, 32, StripCB))
409     return false;
410   return true;
411 }
412 
413 const Value *stripAndAccumulateMinimalOffsets(
414     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
415     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
416     bool UseAssumed = false) {
417 
418   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
419     const IRPosition &Pos = IRPosition::value(V);
420     // Only track dependence if we are going to use the assumed info.
421     const AAValueConstantRange &ValueConstantRangeAA =
422         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
423                                          UseAssumed ? DepClassTy::OPTIONAL
424                                                     : DepClassTy::NONE);
425     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
426                                      : ValueConstantRangeAA.getKnown();
427     // We can only use the lower part of the range because the upper part can
428     // be higher than what the value can really be.
429     ROffset = Range.getSignedMin();
430     return true;
431   };
432 
433   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
434                                                 AttributorAnalysis);
435 }
436 
437 static const Value *getMinimalBaseOfAccsesPointerOperand(
438     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
439     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
440   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
441   if (!Ptr)
442     return nullptr;
443   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
444   const Value *Base = stripAndAccumulateMinimalOffsets(
445       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
446 
447   BytesOffset = OffsetAPInt.getSExtValue();
448   return Base;
449 }
450 
451 static const Value *
452 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
453                                      const DataLayout &DL,
454                                      bool AllowNonInbounds = false) {
455   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
456   if (!Ptr)
457     return nullptr;
458 
459   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
460                                           AllowNonInbounds);
461 }
462 
463 /// Clamp the information known for all returned values of a function
464 /// (identified by \p QueryingAA) into \p S.
465 template <typename AAType, typename StateType = typename AAType::StateType>
466 static void clampReturnedValueStates(
467     Attributor &A, const AAType &QueryingAA, StateType &S,
468     const IRPosition::CallBaseContext *CBContext = nullptr) {
469   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
470                     << QueryingAA << " into " << S << "\n");
471 
472   assert((QueryingAA.getIRPosition().getPositionKind() ==
473               IRPosition::IRP_RETURNED ||
474           QueryingAA.getIRPosition().getPositionKind() ==
475               IRPosition::IRP_CALL_SITE_RETURNED) &&
476          "Can only clamp returned value states for a function returned or call "
477          "site returned position!");
478 
479   // Use an optional state as there might not be any return values and we want
480   // to join (IntegerState::operator&) the state of all there are.
481   Optional<StateType> T;
482 
483   // Callback for each possibly returned value.
484   auto CheckReturnValue = [&](Value &RV) -> bool {
485     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
486     const AAType &AA =
487         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
488     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
489                       << " @ " << RVPos << "\n");
490     const StateType &AAS = AA.getState();
491     if (T.hasValue())
492       *T &= AAS;
493     else
494       T = AAS;
495     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
496                       << "\n");
497     return T->isValidState();
498   };
499 
500   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
501     S.indicatePessimisticFixpoint();
502   else if (T.hasValue())
503     S ^= *T;
504 }
505 
506 /// Helper class for generic deduction: return value -> returned position.
507 template <typename AAType, typename BaseType,
508           typename StateType = typename BaseType::StateType,
509           bool PropagateCallBaseContext = false>
510 struct AAReturnedFromReturnedValues : public BaseType {
511   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
512       : BaseType(IRP, A) {}
513 
514   /// See AbstractAttribute::updateImpl(...).
515   ChangeStatus updateImpl(Attributor &A) override {
516     StateType S(StateType::getBestState(this->getState()));
517     clampReturnedValueStates<AAType, StateType>(
518         A, *this, S,
519         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
520     // TODO: If we know we visited all returned values, thus no are assumed
521     // dead, we can take the known information from the state T.
522     return clampStateAndIndicateChange<StateType>(this->getState(), S);
523   }
524 };
525 
526 /// Clamp the information known at all call sites for a given argument
527 /// (identified by \p QueryingAA) into \p S.
528 template <typename AAType, typename StateType = typename AAType::StateType>
529 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
530                                         StateType &S) {
531   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
532                     << QueryingAA << " into " << S << "\n");
533 
534   assert(QueryingAA.getIRPosition().getPositionKind() ==
535              IRPosition::IRP_ARGUMENT &&
536          "Can only clamp call site argument states for an argument position!");
537 
538   // Use an optional state as there might not be any return values and we want
539   // to join (IntegerState::operator&) the state of all there are.
540   Optional<StateType> T;
541 
542   // The argument number which is also the call site argument number.
543   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
544 
545   auto CallSiteCheck = [&](AbstractCallSite ACS) {
546     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
547     // Check if a coresponding argument was found or if it is on not associated
548     // (which can happen for callback calls).
549     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
550       return false;
551 
552     const AAType &AA =
553         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
554     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
555                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
556     const StateType &AAS = AA.getState();
557     if (T.hasValue())
558       *T &= AAS;
559     else
560       T = AAS;
561     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
562                       << "\n");
563     return T->isValidState();
564   };
565 
566   bool AllCallSitesKnown;
567   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
568                               AllCallSitesKnown))
569     S.indicatePessimisticFixpoint();
570   else if (T.hasValue())
571     S ^= *T;
572 }
573 
574 /// This function is the bridge between argument position and the call base
575 /// context.
576 template <typename AAType, typename BaseType,
577           typename StateType = typename AAType::StateType>
578 bool getArgumentStateFromCallBaseContext(Attributor &A,
579                                          BaseType &QueryingAttribute,
580                                          IRPosition &Pos, StateType &State) {
581   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
582          "Expected an 'argument' position !");
583   const CallBase *CBContext = Pos.getCallBaseContext();
584   if (!CBContext)
585     return false;
586 
587   int ArgNo = Pos.getCallSiteArgNo();
588   assert(ArgNo >= 0 && "Invalid Arg No!");
589 
590   const auto &AA = A.getAAFor<AAType>(
591       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
592       DepClassTy::REQUIRED);
593   const StateType &CBArgumentState =
594       static_cast<const StateType &>(AA.getState());
595 
596   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
597                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
598                     << "\n");
599 
600   // NOTE: If we want to do call site grouping it should happen here.
601   State ^= CBArgumentState;
602   return true;
603 }
604 
605 /// Helper class for generic deduction: call site argument -> argument position.
606 template <typename AAType, typename BaseType,
607           typename StateType = typename AAType::StateType,
608           bool BridgeCallBaseContext = false>
609 struct AAArgumentFromCallSiteArguments : public BaseType {
610   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
611       : BaseType(IRP, A) {}
612 
613   /// See AbstractAttribute::updateImpl(...).
614   ChangeStatus updateImpl(Attributor &A) override {
615     StateType S = StateType::getBestState(this->getState());
616 
617     if (BridgeCallBaseContext) {
618       bool Success =
619           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
620               A, *this, this->getIRPosition(), S);
621       if (Success)
622         return clampStateAndIndicateChange<StateType>(this->getState(), S);
623     }
624     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
625 
626     // TODO: If we know we visited all incoming values, thus no are assumed
627     // dead, we can take the known information from the state T.
628     return clampStateAndIndicateChange<StateType>(this->getState(), S);
629   }
630 };
631 
632 /// Helper class for generic replication: function returned -> cs returned.
633 template <typename AAType, typename BaseType,
634           typename StateType = typename BaseType::StateType,
635           bool IntroduceCallBaseContext = false>
636 struct AACallSiteReturnedFromReturned : public BaseType {
637   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
638       : BaseType(IRP, A) {}
639 
640   /// See AbstractAttribute::updateImpl(...).
641   ChangeStatus updateImpl(Attributor &A) override {
642     assert(this->getIRPosition().getPositionKind() ==
643                IRPosition::IRP_CALL_SITE_RETURNED &&
644            "Can only wrap function returned positions for call site returned "
645            "positions!");
646     auto &S = this->getState();
647 
648     const Function *AssociatedFunction =
649         this->getIRPosition().getAssociatedFunction();
650     if (!AssociatedFunction)
651       return S.indicatePessimisticFixpoint();
652 
653     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
654     if (IntroduceCallBaseContext)
655       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
656                         << CBContext << "\n");
657 
658     IRPosition FnPos = IRPosition::returned(
659         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
660     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
661     return clampStateAndIndicateChange(S, AA.getState());
662   }
663 };
664 
665 /// Helper function to accumulate uses.
666 template <class AAType, typename StateType = typename AAType::StateType>
667 static void followUsesInContext(AAType &AA, Attributor &A,
668                                 MustBeExecutedContextExplorer &Explorer,
669                                 const Instruction *CtxI,
670                                 SetVector<const Use *> &Uses,
671                                 StateType &State) {
672   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
673   for (unsigned u = 0; u < Uses.size(); ++u) {
674     const Use *U = Uses[u];
675     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
676       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
677       if (Found && AA.followUseInMBEC(A, U, UserI, State))
678         for (const Use &Us : UserI->uses())
679           Uses.insert(&Us);
680     }
681   }
682 }
683 
684 /// Use the must-be-executed-context around \p I to add information into \p S.
685 /// The AAType class is required to have `followUseInMBEC` method with the
686 /// following signature and behaviour:
687 ///
688 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
689 /// U - Underlying use.
690 /// I - The user of the \p U.
691 /// Returns true if the value should be tracked transitively.
692 ///
693 template <class AAType, typename StateType = typename AAType::StateType>
694 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
695                              Instruction &CtxI) {
696 
697   // Container for (transitive) uses of the associated value.
698   SetVector<const Use *> Uses;
699   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
700     Uses.insert(&U);
701 
702   MustBeExecutedContextExplorer &Explorer =
703       A.getInfoCache().getMustBeExecutedContextExplorer();
704 
705   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
706 
707   if (S.isAtFixpoint())
708     return;
709 
710   SmallVector<const BranchInst *, 4> BrInsts;
711   auto Pred = [&](const Instruction *I) {
712     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
713       if (Br->isConditional())
714         BrInsts.push_back(Br);
715     return true;
716   };
717 
718   // Here, accumulate conditional branch instructions in the context. We
719   // explore the child paths and collect the known states. The disjunction of
720   // those states can be merged to its own state. Let ParentState_i be a state
721   // to indicate the known information for an i-th branch instruction in the
722   // context. ChildStates are created for its successors respectively.
723   //
724   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
725   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
726   //      ...
727   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
728   //
729   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
730   //
731   // FIXME: Currently, recursive branches are not handled. For example, we
732   // can't deduce that ptr must be dereferenced in below function.
733   //
734   // void f(int a, int c, int *ptr) {
735   //    if(a)
736   //      if (b) {
737   //        *ptr = 0;
738   //      } else {
739   //        *ptr = 1;
740   //      }
741   //    else {
742   //      if (b) {
743   //        *ptr = 0;
744   //      } else {
745   //        *ptr = 1;
746   //      }
747   //    }
748   // }
749 
750   Explorer.checkForAllContext(&CtxI, Pred);
751   for (const BranchInst *Br : BrInsts) {
752     StateType ParentState;
753 
754     // The known state of the parent state is a conjunction of children's
755     // known states so it is initialized with a best state.
756     ParentState.indicateOptimisticFixpoint();
757 
758     for (const BasicBlock *BB : Br->successors()) {
759       StateType ChildState;
760 
761       size_t BeforeSize = Uses.size();
762       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
763 
764       // Erase uses which only appear in the child.
765       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
766         It = Uses.erase(It);
767 
768       ParentState &= ChildState;
769     }
770 
771     // Use only known state.
772     S += ParentState;
773   }
774 }
775 
776 /// ------------------------ PointerInfo ---------------------------------------
777 
778 namespace llvm {
779 namespace AA {
780 namespace PointerInfo {
781 
782 /// An access kind description as used by AAPointerInfo.
783 struct OffsetAndSize;
784 
785 struct State;
786 
787 } // namespace PointerInfo
788 } // namespace AA
789 
790 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
791 template <>
792 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
793   using Access = AAPointerInfo::Access;
794   static inline Access getEmptyKey();
795   static inline Access getTombstoneKey();
796   static unsigned getHashValue(const Access &A);
797   static bool isEqual(const Access &LHS, const Access &RHS);
798 };
799 
800 /// Helper that allows OffsetAndSize as a key in a DenseMap.
801 template <>
802 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
803     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
804 
805 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
806 /// but the instruction
807 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
808   using Base = DenseMapInfo<Instruction *>;
809   using Access = AAPointerInfo::Access;
810   static inline Access getEmptyKey();
811   static inline Access getTombstoneKey();
812   static unsigned getHashValue(const Access &A);
813   static bool isEqual(const Access &LHS, const Access &RHS);
814 };
815 
816 } // namespace llvm
817 
818 /// Helper to represent an access offset and size, with logic to deal with
819 /// uncertainty and check for overlapping accesses.
820 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
821   using BaseTy = std::pair<int64_t, int64_t>;
822   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
823   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
824   int64_t getOffset() const { return first; }
825   int64_t getSize() const { return second; }
826   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
827 
828   /// Return true if this offset and size pair might describe an address that
829   /// overlaps with \p OAS.
830   bool mayOverlap(const OffsetAndSize &OAS) const {
831     // Any unknown value and we are giving up -> overlap.
832     if (OAS.getOffset() == OffsetAndSize::Unknown ||
833         OAS.getSize() == OffsetAndSize::Unknown ||
834         getOffset() == OffsetAndSize::Unknown ||
835         getSize() == OffsetAndSize::Unknown)
836       return true;
837 
838     // Check if one offset point is in the other interval [offset, offset+size].
839     return OAS.getOffset() + OAS.getSize() > getOffset() &&
840            OAS.getOffset() < getOffset() + getSize();
841   }
842 
843   /// Constant used to represent unknown offset or sizes.
844   static constexpr int64_t Unknown = 1 << 31;
845 };
846 
847 /// Implementation of the DenseMapInfo.
848 ///
849 ///{
850 inline llvm::AccessAsInstructionInfo::Access
851 llvm::AccessAsInstructionInfo::getEmptyKey() {
852   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
853 }
854 inline llvm::AccessAsInstructionInfo::Access
855 llvm::AccessAsInstructionInfo::getTombstoneKey() {
856   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
857                 nullptr);
858 }
859 unsigned llvm::AccessAsInstructionInfo::getHashValue(
860     const llvm::AccessAsInstructionInfo::Access &A) {
861   return Base::getHashValue(A.getRemoteInst());
862 }
863 bool llvm::AccessAsInstructionInfo::isEqual(
864     const llvm::AccessAsInstructionInfo::Access &LHS,
865     const llvm::AccessAsInstructionInfo::Access &RHS) {
866   return LHS.getRemoteInst() == RHS.getRemoteInst();
867 }
868 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
869 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
870   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
871                                nullptr);
872 }
873 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
874 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
875   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
876                                nullptr);
877 }
878 
879 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
880     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
881   return detail::combineHashValue(
882              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
883              (A.isWrittenValueYetUndetermined()
884                   ? ~0
885                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
886          A.getKind();
887 }
888 
889 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
890     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
891     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
892   return LHS == RHS;
893 }
894 ///}
895 
896 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
897 struct AA::PointerInfo::State : public AbstractState {
898 
899   /// Return the best possible representable state.
900   static State getBestState(const State &SIS) { return State(); }
901 
902   /// Return the worst possible representable state.
903   static State getWorstState(const State &SIS) {
904     State R;
905     R.indicatePessimisticFixpoint();
906     return R;
907   }
908 
909   State() {}
910   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
911   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
912 
913   const State &getAssumed() const { return *this; }
914 
915   /// See AbstractState::isValidState().
916   bool isValidState() const override { return BS.isValidState(); }
917 
918   /// See AbstractState::isAtFixpoint().
919   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
920 
921   /// See AbstractState::indicateOptimisticFixpoint().
922   ChangeStatus indicateOptimisticFixpoint() override {
923     BS.indicateOptimisticFixpoint();
924     return ChangeStatus::UNCHANGED;
925   }
926 
927   /// See AbstractState::indicatePessimisticFixpoint().
928   ChangeStatus indicatePessimisticFixpoint() override {
929     BS.indicatePessimisticFixpoint();
930     return ChangeStatus::CHANGED;
931   }
932 
933   State &operator=(const State &R) {
934     if (this == &R)
935       return *this;
936     BS = R.BS;
937     AccessBins = R.AccessBins;
938     return *this;
939   }
940 
941   State &operator=(State &&R) {
942     if (this == &R)
943       return *this;
944     std::swap(BS, R.BS);
945     std::swap(AccessBins, R.AccessBins);
946     return *this;
947   }
948 
949   bool operator==(const State &R) const {
950     if (BS != R.BS)
951       return false;
952     if (AccessBins.size() != R.AccessBins.size())
953       return false;
954     auto It = begin(), RIt = R.begin(), E = end();
955     while (It != E) {
956       if (It->getFirst() != RIt->getFirst())
957         return false;
958       auto &Accs = It->getSecond();
959       auto &RAccs = RIt->getSecond();
960       if (Accs.size() != RAccs.size())
961         return false;
962       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
963       while (AccIt != AccE) {
964         if (*AccIt != *RAccIt)
965           return false;
966         ++AccIt;
967         ++RAccIt;
968       }
969       ++It;
970       ++RIt;
971     }
972     return true;
973   }
974   bool operator!=(const State &R) const { return !(*this == R); }
975 
976   /// We store accesses in a set with the instruction as key.
977   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
978 
979   /// We store all accesses in bins denoted by their offset and size.
980   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
981 
982   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
983   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
984 
985 protected:
986   /// The bins with all the accesses for the associated pointer.
987   DenseMap<OffsetAndSize, Accesses> AccessBins;
988 
989   /// Add a new access to the state at offset \p Offset and with size \p Size.
990   /// The access is associated with \p I, writes \p Content (if anything), and
991   /// is of kind \p Kind.
992   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
993   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
994                          Optional<Value *> Content,
995                          AAPointerInfo::AccessKind Kind, Type *Ty,
996                          Instruction *RemoteI = nullptr,
997                          Accesses *BinPtr = nullptr) {
998     OffsetAndSize Key{Offset, Size};
999     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
1000     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
1001     // Check if we have an access for this instruction in this bin, if not,
1002     // simply add it.
1003     auto It = Bin.find(Acc);
1004     if (It == Bin.end()) {
1005       Bin.insert(Acc);
1006       return ChangeStatus::CHANGED;
1007     }
1008     // If the existing access is the same as then new one, nothing changed.
1009     AAPointerInfo::Access Before = *It;
1010     // The new one will be combined with the existing one.
1011     *It &= Acc;
1012     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
1013   }
1014 
1015   /// See AAPointerInfo::forallInterferingAccesses.
1016   bool forallInterferingAccesses(
1017       Instruction &I,
1018       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
1019     if (!isValidState())
1020       return false;
1021     // First find the offset and size of I.
1022     OffsetAndSize OAS(-1, -1);
1023     for (auto &It : AccessBins) {
1024       for (auto &Access : It.getSecond()) {
1025         if (Access.getRemoteInst() == &I) {
1026           OAS = It.getFirst();
1027           break;
1028         }
1029       }
1030       if (OAS.getSize() != -1)
1031         break;
1032     }
1033     if (OAS.getSize() == -1)
1034       return true;
1035 
1036     // Now that we have an offset and size, find all overlapping ones and use
1037     // the callback on the accesses.
1038     for (auto &It : AccessBins) {
1039       OffsetAndSize ItOAS = It.getFirst();
1040       if (!OAS.mayOverlap(ItOAS))
1041         continue;
1042       for (auto &Access : It.getSecond())
1043         if (!CB(Access, OAS == ItOAS))
1044           return false;
1045     }
1046     return true;
1047   }
1048 
1049 private:
1050   /// State to track fixpoint and validity.
1051   BooleanState BS;
1052 };
1053 
1054 struct AAPointerInfoImpl
1055     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1056   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1057   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1058 
1059   /// See AbstractAttribute::initialize(...).
1060   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1061 
1062   /// See AbstractAttribute::getAsStr().
1063   const std::string getAsStr() const override {
1064     return std::string("PointerInfo ") +
1065            (isValidState() ? (std::string("#") +
1066                               std::to_string(AccessBins.size()) + " bins")
1067                            : "<invalid>");
1068   }
1069 
1070   /// See AbstractAttribute::manifest(...).
1071   ChangeStatus manifest(Attributor &A) override {
1072     return AAPointerInfo::manifest(A);
1073   }
1074 
1075   bool forallInterferingAccesses(
1076       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1077       const override {
1078     return State::forallInterferingAccesses(LI, CB);
1079   }
1080   bool forallInterferingAccesses(
1081       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1082       const override {
1083     return State::forallInterferingAccesses(SI, CB);
1084   }
1085 
1086   ChangeStatus translateAndAddCalleeState(Attributor &A,
1087                                           const AAPointerInfo &CalleeAA,
1088                                           int64_t CallArgOffset, CallBase &CB) {
1089     using namespace AA::PointerInfo;
1090     if (!CalleeAA.getState().isValidState() || !isValidState())
1091       return indicatePessimisticFixpoint();
1092 
1093     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1094     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1095 
1096     // Combine the accesses bin by bin.
1097     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1098     for (auto &It : CalleeImplAA.getState()) {
1099       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1100       if (CallArgOffset != OffsetAndSize::Unknown)
1101         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1102                             It.first.getSize());
1103       Accesses &Bin = AccessBins[OAS];
1104       for (const AAPointerInfo::Access &RAcc : It.second) {
1105         if (IsByval && !RAcc.isRead())
1106           continue;
1107         bool UsedAssumedInformation = false;
1108         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1109             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1110         AccessKind AK =
1111             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1112                                                  : AccessKind::AK_READ_WRITE));
1113         Changed =
1114             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1115                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1116       }
1117     }
1118     return Changed;
1119   }
1120 
1121   /// Statistic tracking for all AAPointerInfo implementations.
1122   /// See AbstractAttribute::trackStatistics().
1123   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1124 };
1125 
1126 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1127   using AccessKind = AAPointerInfo::AccessKind;
1128   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1129       : AAPointerInfoImpl(IRP, A) {}
1130 
1131   /// See AbstractAttribute::initialize(...).
1132   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1133 
1134   /// Deal with an access and signal if it was handled successfully.
1135   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1136                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1137                     ChangeStatus &Changed, Type *Ty,
1138                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1139     using namespace AA::PointerInfo;
1140     // No need to find a size if one is given or the offset is unknown.
1141     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1142         Ty) {
1143       const DataLayout &DL = A.getDataLayout();
1144       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1145       if (!AccessSize.isScalable())
1146         Size = AccessSize.getFixedSize();
1147     }
1148     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1149     return true;
1150   };
1151 
1152   /// Helper struct, will support ranges eventually.
1153   struct OffsetInfo {
1154     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1155 
1156     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1157   };
1158 
1159   /// See AbstractAttribute::updateImpl(...).
1160   ChangeStatus updateImpl(Attributor &A) override {
1161     using namespace AA::PointerInfo;
1162     State S = getState();
1163     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1164     Value &AssociatedValue = getAssociatedValue();
1165 
1166     const DataLayout &DL = A.getDataLayout();
1167     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1168     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1169 
1170     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1171                                      bool &Follow) {
1172       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1173       UsrOI = PtrOI;
1174       Follow = true;
1175       return true;
1176     };
1177 
1178     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1179       Value *CurPtr = U.get();
1180       User *Usr = U.getUser();
1181       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1182                         << *Usr << "\n");
1183 
1184       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1185 
1186       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1187         if (CE->isCast())
1188           return HandlePassthroughUser(Usr, PtrOI, Follow);
1189         if (CE->isCompare())
1190           return true;
1191         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1192           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1193                             << "\n");
1194           return false;
1195         }
1196       }
1197       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1198         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1199         UsrOI = PtrOI;
1200 
1201         // TODO: Use range information.
1202         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1203             !GEP->hasAllConstantIndices()) {
1204           UsrOI.Offset = OffsetAndSize::Unknown;
1205           Follow = true;
1206           return true;
1207         }
1208 
1209         SmallVector<Value *, 8> Indices;
1210         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1211           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1212             Indices.push_back(CIdx);
1213             continue;
1214           }
1215 
1216           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1217                             << " : " << *Idx << "\n");
1218           return false;
1219         }
1220         UsrOI.Offset = PtrOI.Offset +
1221                        DL.getIndexedOffsetInType(
1222                            CurPtr->getType()->getPointerElementType(), Indices);
1223         Follow = true;
1224         return true;
1225       }
1226       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1227         return HandlePassthroughUser(Usr, PtrOI, Follow);
1228 
1229       // For PHIs we need to take care of the recurrence explicitly as the value
1230       // might change while we iterate through a loop. For now, we give up if
1231       // the PHI is not invariant.
1232       if (isa<PHINode>(Usr)) {
1233         // Check if the PHI is invariant (so far).
1234         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1235         if (UsrOI == PtrOI)
1236           return true;
1237 
1238         // Check if the PHI operand has already an unknown offset as we can't
1239         // improve on that anymore.
1240         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1241           UsrOI = PtrOI;
1242           Follow = true;
1243           return true;
1244         }
1245 
1246         // Check if the PHI operand is not dependent on the PHI itself.
1247         APInt Offset(DL.getIndexTypeSizeInBits(AssociatedValue.getType()), 0);
1248         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1249                                     DL, Offset, /* AllowNonInbounds */ true)) {
1250           if (Offset != PtrOI.Offset) {
1251             LLVM_DEBUG(dbgs()
1252                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1253                        << *CurPtr << " in " << *Usr << "\n");
1254             return false;
1255           }
1256           return HandlePassthroughUser(Usr, PtrOI, Follow);
1257         }
1258 
1259         // TODO: Approximate in case we know the direction of the recurrence.
1260         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1261                           << *CurPtr << " in " << *Usr << "\n");
1262         UsrOI = PtrOI;
1263         UsrOI.Offset = OffsetAndSize::Unknown;
1264         Follow = true;
1265         return true;
1266       }
1267 
1268       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1269         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1270                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1271                             LoadI->getType());
1272       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1273         if (StoreI->getValueOperand() == CurPtr) {
1274           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1275                             << *StoreI << "\n");
1276           return false;
1277         }
1278         bool UsedAssumedInformation = false;
1279         Optional<Value *> Content = A.getAssumedSimplified(
1280             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1281         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1282                             PtrOI.Offset, Changed,
1283                             StoreI->getValueOperand()->getType());
1284       }
1285       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1286         if (CB->isLifetimeStartOrEnd())
1287           return true;
1288         if (CB->isArgOperand(&U)) {
1289           unsigned ArgNo = CB->getArgOperandNo(&U);
1290           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1291               *this, IRPosition::callsite_argument(*CB, ArgNo),
1292               DepClassTy::REQUIRED);
1293           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1294                     Changed;
1295           return true;
1296         }
1297         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1298                           << "\n");
1299         // TODO: Allow some call uses
1300         return false;
1301       }
1302 
1303       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1304       return false;
1305     };
1306     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1307                            /* CheckBBLivenessOnly */ true))
1308       return indicatePessimisticFixpoint();
1309 
1310     LLVM_DEBUG({
1311       dbgs() << "Accesses by bin after update:\n";
1312       for (auto &It : AccessBins) {
1313         dbgs() << "[" << It.first.getOffset() << "-"
1314                << It.first.getOffset() + It.first.getSize()
1315                << "] : " << It.getSecond().size() << "\n";
1316         for (auto &Acc : It.getSecond()) {
1317           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1318                  << "\n";
1319           if (Acc.getLocalInst() != Acc.getRemoteInst())
1320             dbgs() << "     -->                         "
1321                    << *Acc.getRemoteInst() << "\n";
1322           if (!Acc.isWrittenValueYetUndetermined())
1323             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1324         }
1325       }
1326     });
1327 
1328     return Changed;
1329   }
1330 
1331   /// See AbstractAttribute::trackStatistics()
1332   void trackStatistics() const override {
1333     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1334   }
1335 };
1336 
1337 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1338   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1339       : AAPointerInfoImpl(IRP, A) {}
1340 
1341   /// See AbstractAttribute::updateImpl(...).
1342   ChangeStatus updateImpl(Attributor &A) override {
1343     return indicatePessimisticFixpoint();
1344   }
1345 
1346   /// See AbstractAttribute::trackStatistics()
1347   void trackStatistics() const override {
1348     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1349   }
1350 };
1351 
1352 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1353   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1354       : AAPointerInfoFloating(IRP, A) {}
1355 
1356   /// See AbstractAttribute::initialize(...).
1357   void initialize(Attributor &A) override {
1358     AAPointerInfoFloating::initialize(A);
1359     if (getAnchorScope()->isDeclaration())
1360       indicatePessimisticFixpoint();
1361   }
1362 
1363   /// See AbstractAttribute::trackStatistics()
1364   void trackStatistics() const override {
1365     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1366   }
1367 };
1368 
1369 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1370   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1371       : AAPointerInfoFloating(IRP, A) {}
1372 
1373   /// See AbstractAttribute::updateImpl(...).
1374   ChangeStatus updateImpl(Attributor &A) override {
1375     using namespace AA::PointerInfo;
1376     // We handle memory intrinsics explicitly, at least the first (=
1377     // destination) and second (=source) arguments as we know how they are
1378     // accessed.
1379     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1380       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1381       int64_t LengthVal = OffsetAndSize::Unknown;
1382       if (Length)
1383         LengthVal = Length->getSExtValue();
1384       Value &Ptr = getAssociatedValue();
1385       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1386       ChangeStatus Changed;
1387       if (ArgNo == 0) {
1388         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1389                      nullptr, LengthVal);
1390       } else if (ArgNo == 1) {
1391         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1392                      nullptr, LengthVal);
1393       } else {
1394         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1395                           << *MI << "\n");
1396         return indicatePessimisticFixpoint();
1397       }
1398       return Changed;
1399     }
1400 
1401     // TODO: Once we have call site specific value information we can provide
1402     //       call site specific liveness information and then it makes
1403     //       sense to specialize attributes for call sites arguments instead of
1404     //       redirecting requests to the callee argument.
1405     Argument *Arg = getAssociatedArgument();
1406     if (!Arg)
1407       return indicatePessimisticFixpoint();
1408     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1409     auto &ArgAA =
1410         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1411     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1412   }
1413 
1414   /// See AbstractAttribute::trackStatistics()
1415   void trackStatistics() const override {
1416     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1417   }
1418 };
1419 
1420 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1421   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1422       : AAPointerInfoFloating(IRP, A) {}
1423 
1424   /// See AbstractAttribute::trackStatistics()
1425   void trackStatistics() const override {
1426     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1427   }
1428 };
1429 
1430 /// -----------------------NoUnwind Function Attribute--------------------------
1431 
1432 struct AANoUnwindImpl : AANoUnwind {
1433   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1434 
1435   const std::string getAsStr() const override {
1436     return getAssumed() ? "nounwind" : "may-unwind";
1437   }
1438 
1439   /// See AbstractAttribute::updateImpl(...).
1440   ChangeStatus updateImpl(Attributor &A) override {
1441     auto Opcodes = {
1442         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1443         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1444         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1445 
1446     auto CheckForNoUnwind = [&](Instruction &I) {
1447       if (!I.mayThrow())
1448         return true;
1449 
1450       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1451         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1452             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1453         return NoUnwindAA.isAssumedNoUnwind();
1454       }
1455       return false;
1456     };
1457 
1458     bool UsedAssumedInformation = false;
1459     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1460                                    UsedAssumedInformation))
1461       return indicatePessimisticFixpoint();
1462 
1463     return ChangeStatus::UNCHANGED;
1464   }
1465 };
1466 
1467 struct AANoUnwindFunction final : public AANoUnwindImpl {
1468   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1469       : AANoUnwindImpl(IRP, A) {}
1470 
1471   /// See AbstractAttribute::trackStatistics()
1472   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1473 };
1474 
1475 /// NoUnwind attribute deduction for a call sites.
1476 struct AANoUnwindCallSite final : AANoUnwindImpl {
1477   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1478       : AANoUnwindImpl(IRP, A) {}
1479 
1480   /// See AbstractAttribute::initialize(...).
1481   void initialize(Attributor &A) override {
1482     AANoUnwindImpl::initialize(A);
1483     Function *F = getAssociatedFunction();
1484     if (!F || F->isDeclaration())
1485       indicatePessimisticFixpoint();
1486   }
1487 
1488   /// See AbstractAttribute::updateImpl(...).
1489   ChangeStatus updateImpl(Attributor &A) override {
1490     // TODO: Once we have call site specific value information we can provide
1491     //       call site specific liveness information and then it makes
1492     //       sense to specialize attributes for call sites arguments instead of
1493     //       redirecting requests to the callee argument.
1494     Function *F = getAssociatedFunction();
1495     const IRPosition &FnPos = IRPosition::function(*F);
1496     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1497     return clampStateAndIndicateChange(getState(), FnAA.getState());
1498   }
1499 
1500   /// See AbstractAttribute::trackStatistics()
1501   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1502 };
1503 
1504 /// --------------------- Function Return Values -------------------------------
1505 
1506 /// "Attribute" that collects all potential returned values and the return
1507 /// instructions that they arise from.
1508 ///
1509 /// If there is a unique returned value R, the manifest method will:
1510 ///   - mark R with the "returned" attribute, if R is an argument.
1511 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1512 
1513   /// Mapping of values potentially returned by the associated function to the
1514   /// return instructions that might return them.
1515   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1516 
1517   /// State flags
1518   ///
1519   ///{
1520   bool IsFixed = false;
1521   bool IsValidState = true;
1522   ///}
1523 
1524 public:
1525   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1526       : AAReturnedValues(IRP, A) {}
1527 
1528   /// See AbstractAttribute::initialize(...).
1529   void initialize(Attributor &A) override {
1530     // Reset the state.
1531     IsFixed = false;
1532     IsValidState = true;
1533     ReturnedValues.clear();
1534 
1535     Function *F = getAssociatedFunction();
1536     if (!F || F->isDeclaration()) {
1537       indicatePessimisticFixpoint();
1538       return;
1539     }
1540     assert(!F->getReturnType()->isVoidTy() &&
1541            "Did not expect a void return type!");
1542 
1543     // The map from instruction opcodes to those instructions in the function.
1544     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1545 
1546     // Look through all arguments, if one is marked as returned we are done.
1547     for (Argument &Arg : F->args()) {
1548       if (Arg.hasReturnedAttr()) {
1549         auto &ReturnInstSet = ReturnedValues[&Arg];
1550         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1551           for (Instruction *RI : *Insts)
1552             ReturnInstSet.insert(cast<ReturnInst>(RI));
1553 
1554         indicateOptimisticFixpoint();
1555         return;
1556       }
1557     }
1558 
1559     if (!A.isFunctionIPOAmendable(*F))
1560       indicatePessimisticFixpoint();
1561   }
1562 
1563   /// See AbstractAttribute::manifest(...).
1564   ChangeStatus manifest(Attributor &A) override;
1565 
1566   /// See AbstractAttribute::getState(...).
1567   AbstractState &getState() override { return *this; }
1568 
1569   /// See AbstractAttribute::getState(...).
1570   const AbstractState &getState() const override { return *this; }
1571 
1572   /// See AbstractAttribute::updateImpl(Attributor &A).
1573   ChangeStatus updateImpl(Attributor &A) override;
1574 
1575   llvm::iterator_range<iterator> returned_values() override {
1576     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1577   }
1578 
1579   llvm::iterator_range<const_iterator> returned_values() const override {
1580     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1581   }
1582 
1583   /// Return the number of potential return values, -1 if unknown.
1584   size_t getNumReturnValues() const override {
1585     return isValidState() ? ReturnedValues.size() : -1;
1586   }
1587 
1588   /// Return an assumed unique return value if a single candidate is found. If
1589   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1590   /// Optional::NoneType.
1591   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1592 
1593   /// See AbstractState::checkForAllReturnedValues(...).
1594   bool checkForAllReturnedValuesAndReturnInsts(
1595       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1596       const override;
1597 
1598   /// Pretty print the attribute similar to the IR representation.
1599   const std::string getAsStr() const override;
1600 
1601   /// See AbstractState::isAtFixpoint().
1602   bool isAtFixpoint() const override { return IsFixed; }
1603 
1604   /// See AbstractState::isValidState().
1605   bool isValidState() const override { return IsValidState; }
1606 
1607   /// See AbstractState::indicateOptimisticFixpoint(...).
1608   ChangeStatus indicateOptimisticFixpoint() override {
1609     IsFixed = true;
1610     return ChangeStatus::UNCHANGED;
1611   }
1612 
1613   ChangeStatus indicatePessimisticFixpoint() override {
1614     IsFixed = true;
1615     IsValidState = false;
1616     return ChangeStatus::CHANGED;
1617   }
1618 };
1619 
1620 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1621   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1622 
1623   // Bookkeeping.
1624   assert(isValidState());
1625   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1626                   "Number of function with known return values");
1627 
1628   // Check if we have an assumed unique return value that we could manifest.
1629   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1630 
1631   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1632     return Changed;
1633 
1634   // Bookkeeping.
1635   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1636                   "Number of function with unique return");
1637   // If the assumed unique return value is an argument, annotate it.
1638   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1639     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1640             getAssociatedFunction()->getReturnType())) {
1641       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1642       Changed = IRAttribute::manifest(A);
1643     }
1644   }
1645   return Changed;
1646 }
1647 
1648 const std::string AAReturnedValuesImpl::getAsStr() const {
1649   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1650          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1651 }
1652 
1653 Optional<Value *>
1654 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1655   // If checkForAllReturnedValues provides a unique value, ignoring potential
1656   // undef values that can also be present, it is assumed to be the actual
1657   // return value and forwarded to the caller of this method. If there are
1658   // multiple, a nullptr is returned indicating there cannot be a unique
1659   // returned value.
1660   Optional<Value *> UniqueRV;
1661   Type *Ty = getAssociatedFunction()->getReturnType();
1662 
1663   auto Pred = [&](Value &RV) -> bool {
1664     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1665     return UniqueRV != Optional<Value *>(nullptr);
1666   };
1667 
1668   if (!A.checkForAllReturnedValues(Pred, *this))
1669     UniqueRV = nullptr;
1670 
1671   return UniqueRV;
1672 }
1673 
1674 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1675     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1676     const {
1677   if (!isValidState())
1678     return false;
1679 
1680   // Check all returned values but ignore call sites as long as we have not
1681   // encountered an overdefined one during an update.
1682   for (auto &It : ReturnedValues) {
1683     Value *RV = It.first;
1684     if (!Pred(*RV, It.second))
1685       return false;
1686   }
1687 
1688   return true;
1689 }
1690 
1691 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1692   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1693 
1694   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1695                            bool) -> bool {
1696     bool UsedAssumedInformation = false;
1697     Optional<Value *> SimpleRetVal =
1698         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1699     if (!SimpleRetVal.hasValue())
1700       return true;
1701     if (!SimpleRetVal.getValue())
1702       return false;
1703     Value *RetVal = *SimpleRetVal;
1704     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1705            "Assumed returned value should be valid in function scope!");
1706     if (ReturnedValues[RetVal].insert(&Ret))
1707       Changed = ChangeStatus::CHANGED;
1708     return true;
1709   };
1710 
1711   auto ReturnInstCB = [&](Instruction &I) {
1712     ReturnInst &Ret = cast<ReturnInst>(I);
1713     return genericValueTraversal<ReturnInst>(
1714         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1715         &I);
1716   };
1717 
1718   // Discover returned values from all live returned instructions in the
1719   // associated function.
1720   bool UsedAssumedInformation = false;
1721   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1722                                  UsedAssumedInformation))
1723     return indicatePessimisticFixpoint();
1724   return Changed;
1725 }
1726 
1727 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1728   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1729       : AAReturnedValuesImpl(IRP, A) {}
1730 
1731   /// See AbstractAttribute::trackStatistics()
1732   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1733 };
1734 
1735 /// Returned values information for a call sites.
1736 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1737   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1738       : AAReturnedValuesImpl(IRP, A) {}
1739 
1740   /// See AbstractAttribute::initialize(...).
1741   void initialize(Attributor &A) override {
1742     // TODO: Once we have call site specific value information we can provide
1743     //       call site specific liveness information and then it makes
1744     //       sense to specialize attributes for call sites instead of
1745     //       redirecting requests to the callee.
1746     llvm_unreachable("Abstract attributes for returned values are not "
1747                      "supported for call sites yet!");
1748   }
1749 
1750   /// See AbstractAttribute::updateImpl(...).
1751   ChangeStatus updateImpl(Attributor &A) override {
1752     return indicatePessimisticFixpoint();
1753   }
1754 
1755   /// See AbstractAttribute::trackStatistics()
1756   void trackStatistics() const override {}
1757 };
1758 
1759 /// ------------------------ NoSync Function Attribute -------------------------
1760 
1761 struct AANoSyncImpl : AANoSync {
1762   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1763 
1764   const std::string getAsStr() const override {
1765     return getAssumed() ? "nosync" : "may-sync";
1766   }
1767 
1768   /// See AbstractAttribute::updateImpl(...).
1769   ChangeStatus updateImpl(Attributor &A) override;
1770 
1771   /// Helper function used to determine whether an instruction is non-relaxed
1772   /// atomic. In other words, if an atomic instruction does not have unordered
1773   /// or monotonic ordering
1774   static bool isNonRelaxedAtomic(Instruction *I);
1775 
1776   /// Helper function specific for intrinsics which are potentially volatile
1777   static bool isNoSyncIntrinsic(Instruction *I);
1778 };
1779 
1780 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1781   if (!I->isAtomic())
1782     return false;
1783 
1784   if (auto *FI = dyn_cast<FenceInst>(I))
1785     // All legal orderings for fence are stronger than monotonic.
1786     return FI->getSyncScopeID() != SyncScope::SingleThread;
1787   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1788     // Unordered is not a legal ordering for cmpxchg.
1789     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1790             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1791   }
1792 
1793   AtomicOrdering Ordering;
1794   switch (I->getOpcode()) {
1795   case Instruction::AtomicRMW:
1796     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1797     break;
1798   case Instruction::Store:
1799     Ordering = cast<StoreInst>(I)->getOrdering();
1800     break;
1801   case Instruction::Load:
1802     Ordering = cast<LoadInst>(I)->getOrdering();
1803     break;
1804   default:
1805     llvm_unreachable(
1806         "New atomic operations need to be known in the attributor.");
1807   }
1808 
1809   return (Ordering != AtomicOrdering::Unordered &&
1810           Ordering != AtomicOrdering::Monotonic);
1811 }
1812 
1813 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1814 /// which would be nosync except that they have a volatile flag.  All other
1815 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1816 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1817   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1818     return !MI->isVolatile();
1819   return false;
1820 }
1821 
1822 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1823 
1824   auto CheckRWInstForNoSync = [&](Instruction &I) {
1825     /// We are looking for volatile instructions or Non-Relaxed atomics.
1826 
1827     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1828       if (CB->hasFnAttr(Attribute::NoSync))
1829         return true;
1830 
1831       if (isNoSyncIntrinsic(&I))
1832         return true;
1833 
1834       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1835           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1836       return NoSyncAA.isAssumedNoSync();
1837     }
1838 
1839     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1840       return true;
1841 
1842     return false;
1843   };
1844 
1845   auto CheckForNoSync = [&](Instruction &I) {
1846     // At this point we handled all read/write effects and they are all
1847     // nosync, so they can be skipped.
1848     if (I.mayReadOrWriteMemory())
1849       return true;
1850 
1851     // non-convergent and readnone imply nosync.
1852     return !cast<CallBase>(I).isConvergent();
1853   };
1854 
1855   bool UsedAssumedInformation = false;
1856   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1857                                           UsedAssumedInformation) ||
1858       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1859                                          UsedAssumedInformation))
1860     return indicatePessimisticFixpoint();
1861 
1862   return ChangeStatus::UNCHANGED;
1863 }
1864 
1865 struct AANoSyncFunction final : public AANoSyncImpl {
1866   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1867       : AANoSyncImpl(IRP, A) {}
1868 
1869   /// See AbstractAttribute::trackStatistics()
1870   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1871 };
1872 
1873 /// NoSync attribute deduction for a call sites.
1874 struct AANoSyncCallSite final : AANoSyncImpl {
1875   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1876       : AANoSyncImpl(IRP, A) {}
1877 
1878   /// See AbstractAttribute::initialize(...).
1879   void initialize(Attributor &A) override {
1880     AANoSyncImpl::initialize(A);
1881     Function *F = getAssociatedFunction();
1882     if (!F || F->isDeclaration())
1883       indicatePessimisticFixpoint();
1884   }
1885 
1886   /// See AbstractAttribute::updateImpl(...).
1887   ChangeStatus updateImpl(Attributor &A) override {
1888     // TODO: Once we have call site specific value information we can provide
1889     //       call site specific liveness information and then it makes
1890     //       sense to specialize attributes for call sites arguments instead of
1891     //       redirecting requests to the callee argument.
1892     Function *F = getAssociatedFunction();
1893     const IRPosition &FnPos = IRPosition::function(*F);
1894     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1895     return clampStateAndIndicateChange(getState(), FnAA.getState());
1896   }
1897 
1898   /// See AbstractAttribute::trackStatistics()
1899   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1900 };
1901 
1902 /// ------------------------ No-Free Attributes ----------------------------
1903 
1904 struct AANoFreeImpl : public AANoFree {
1905   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     auto CheckForNoFree = [&](Instruction &I) {
1910       const auto &CB = cast<CallBase>(I);
1911       if (CB.hasFnAttr(Attribute::NoFree))
1912         return true;
1913 
1914       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1915           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1916       return NoFreeAA.isAssumedNoFree();
1917     };
1918 
1919     bool UsedAssumedInformation = false;
1920     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1921                                            UsedAssumedInformation))
1922       return indicatePessimisticFixpoint();
1923     return ChangeStatus::UNCHANGED;
1924   }
1925 
1926   /// See AbstractAttribute::getAsStr().
1927   const std::string getAsStr() const override {
1928     return getAssumed() ? "nofree" : "may-free";
1929   }
1930 };
1931 
1932 struct AANoFreeFunction final : public AANoFreeImpl {
1933   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1934       : AANoFreeImpl(IRP, A) {}
1935 
1936   /// See AbstractAttribute::trackStatistics()
1937   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1938 };
1939 
1940 /// NoFree attribute deduction for a call sites.
1941 struct AANoFreeCallSite final : AANoFreeImpl {
1942   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1943       : AANoFreeImpl(IRP, A) {}
1944 
1945   /// See AbstractAttribute::initialize(...).
1946   void initialize(Attributor &A) override {
1947     AANoFreeImpl::initialize(A);
1948     Function *F = getAssociatedFunction();
1949     if (!F || F->isDeclaration())
1950       indicatePessimisticFixpoint();
1951   }
1952 
1953   /// See AbstractAttribute::updateImpl(...).
1954   ChangeStatus updateImpl(Attributor &A) override {
1955     // TODO: Once we have call site specific value information we can provide
1956     //       call site specific liveness information and then it makes
1957     //       sense to specialize attributes for call sites arguments instead of
1958     //       redirecting requests to the callee argument.
1959     Function *F = getAssociatedFunction();
1960     const IRPosition &FnPos = IRPosition::function(*F);
1961     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1962     return clampStateAndIndicateChange(getState(), FnAA.getState());
1963   }
1964 
1965   /// See AbstractAttribute::trackStatistics()
1966   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1967 };
1968 
1969 /// NoFree attribute for floating values.
1970 struct AANoFreeFloating : AANoFreeImpl {
1971   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1972       : AANoFreeImpl(IRP, A) {}
1973 
1974   /// See AbstractAttribute::trackStatistics()
1975   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1976 
1977   /// See Abstract Attribute::updateImpl(...).
1978   ChangeStatus updateImpl(Attributor &A) override {
1979     const IRPosition &IRP = getIRPosition();
1980 
1981     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1982         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1983     if (NoFreeAA.isAssumedNoFree())
1984       return ChangeStatus::UNCHANGED;
1985 
1986     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1987     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1988       Instruction *UserI = cast<Instruction>(U.getUser());
1989       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1990         if (CB->isBundleOperand(&U))
1991           return false;
1992         if (!CB->isArgOperand(&U))
1993           return true;
1994         unsigned ArgNo = CB->getArgOperandNo(&U);
1995 
1996         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1997             *this, IRPosition::callsite_argument(*CB, ArgNo),
1998             DepClassTy::REQUIRED);
1999         return NoFreeArg.isAssumedNoFree();
2000       }
2001 
2002       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
2003           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
2004         Follow = true;
2005         return true;
2006       }
2007       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
2008           isa<ReturnInst>(UserI))
2009         return true;
2010 
2011       // Unknown user.
2012       return false;
2013     };
2014     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
2015       return indicatePessimisticFixpoint();
2016 
2017     return ChangeStatus::UNCHANGED;
2018   }
2019 };
2020 
2021 /// NoFree attribute for a call site argument.
2022 struct AANoFreeArgument final : AANoFreeFloating {
2023   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2024       : AANoFreeFloating(IRP, A) {}
2025 
2026   /// See AbstractAttribute::trackStatistics()
2027   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2028 };
2029 
2030 /// NoFree attribute for call site arguments.
2031 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2032   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2033       : AANoFreeFloating(IRP, A) {}
2034 
2035   /// See AbstractAttribute::updateImpl(...).
2036   ChangeStatus updateImpl(Attributor &A) override {
2037     // TODO: Once we have call site specific value information we can provide
2038     //       call site specific liveness information and then it makes
2039     //       sense to specialize attributes for call sites arguments instead of
2040     //       redirecting requests to the callee argument.
2041     Argument *Arg = getAssociatedArgument();
2042     if (!Arg)
2043       return indicatePessimisticFixpoint();
2044     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2045     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2046     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2047   }
2048 
2049   /// See AbstractAttribute::trackStatistics()
2050   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2051 };
2052 
2053 /// NoFree attribute for function return value.
2054 struct AANoFreeReturned final : AANoFreeFloating {
2055   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2056       : AANoFreeFloating(IRP, A) {
2057     llvm_unreachable("NoFree is not applicable to function returns!");
2058   }
2059 
2060   /// See AbstractAttribute::initialize(...).
2061   void initialize(Attributor &A) override {
2062     llvm_unreachable("NoFree is not applicable to function returns!");
2063   }
2064 
2065   /// See AbstractAttribute::updateImpl(...).
2066   ChangeStatus updateImpl(Attributor &A) override {
2067     llvm_unreachable("NoFree is not applicable to function returns!");
2068   }
2069 
2070   /// See AbstractAttribute::trackStatistics()
2071   void trackStatistics() const override {}
2072 };
2073 
2074 /// NoFree attribute deduction for a call site return value.
2075 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2076   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2077       : AANoFreeFloating(IRP, A) {}
2078 
2079   ChangeStatus manifest(Attributor &A) override {
2080     return ChangeStatus::UNCHANGED;
2081   }
2082   /// See AbstractAttribute::trackStatistics()
2083   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2084 };
2085 
2086 /// ------------------------ NonNull Argument Attribute ------------------------
2087 static int64_t getKnownNonNullAndDerefBytesForUse(
2088     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2089     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2090   TrackUse = false;
2091 
2092   const Value *UseV = U->get();
2093   if (!UseV->getType()->isPointerTy())
2094     return 0;
2095 
2096   // We need to follow common pointer manipulation uses to the accesses they
2097   // feed into. We can try to be smart to avoid looking through things we do not
2098   // like for now, e.g., non-inbounds GEPs.
2099   if (isa<CastInst>(I)) {
2100     TrackUse = true;
2101     return 0;
2102   }
2103 
2104   if (isa<GetElementPtrInst>(I)) {
2105     TrackUse = true;
2106     return 0;
2107   }
2108 
2109   Type *PtrTy = UseV->getType();
2110   const Function *F = I->getFunction();
2111   bool NullPointerIsDefined =
2112       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2113   const DataLayout &DL = A.getInfoCache().getDL();
2114   if (const auto *CB = dyn_cast<CallBase>(I)) {
2115     if (CB->isBundleOperand(U)) {
2116       if (RetainedKnowledge RK = getKnowledgeFromUse(
2117               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2118         IsNonNull |=
2119             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2120         return RK.ArgValue;
2121       }
2122       return 0;
2123     }
2124 
2125     if (CB->isCallee(U)) {
2126       IsNonNull |= !NullPointerIsDefined;
2127       return 0;
2128     }
2129 
2130     unsigned ArgNo = CB->getArgOperandNo(U);
2131     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2132     // As long as we only use known information there is no need to track
2133     // dependences here.
2134     auto &DerefAA =
2135         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2136     IsNonNull |= DerefAA.isKnownNonNull();
2137     return DerefAA.getKnownDereferenceableBytes();
2138   }
2139 
2140   int64_t Offset;
2141   const Value *Base =
2142       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2143   if (Base) {
2144     if (Base == &AssociatedValue &&
2145         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2146       int64_t DerefBytes =
2147           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2148 
2149       IsNonNull |= !NullPointerIsDefined;
2150       return std::max(int64_t(0), DerefBytes);
2151     }
2152   }
2153 
2154   /// Corner case when an offset is 0.
2155   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2156                                               /*AllowNonInbounds*/ true);
2157   if (Base) {
2158     if (Offset == 0 && Base == &AssociatedValue &&
2159         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2160       int64_t DerefBytes =
2161           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2162       IsNonNull |= !NullPointerIsDefined;
2163       return std::max(int64_t(0), DerefBytes);
2164     }
2165   }
2166 
2167   return 0;
2168 }
2169 
2170 struct AANonNullImpl : AANonNull {
2171   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2172       : AANonNull(IRP, A),
2173         NullIsDefined(NullPointerIsDefined(
2174             getAnchorScope(),
2175             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2176 
2177   /// See AbstractAttribute::initialize(...).
2178   void initialize(Attributor &A) override {
2179     Value &V = getAssociatedValue();
2180     if (!NullIsDefined &&
2181         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2182                 /* IgnoreSubsumingPositions */ false, &A)) {
2183       indicateOptimisticFixpoint();
2184       return;
2185     }
2186 
2187     if (isa<ConstantPointerNull>(V)) {
2188       indicatePessimisticFixpoint();
2189       return;
2190     }
2191 
2192     AANonNull::initialize(A);
2193 
2194     bool CanBeNull, CanBeFreed;
2195     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2196                                          CanBeFreed)) {
2197       if (!CanBeNull) {
2198         indicateOptimisticFixpoint();
2199         return;
2200       }
2201     }
2202 
2203     if (isa<GlobalValue>(&getAssociatedValue())) {
2204       indicatePessimisticFixpoint();
2205       return;
2206     }
2207 
2208     if (Instruction *CtxI = getCtxI())
2209       followUsesInMBEC(*this, A, getState(), *CtxI);
2210   }
2211 
2212   /// See followUsesInMBEC
2213   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2214                        AANonNull::StateType &State) {
2215     bool IsNonNull = false;
2216     bool TrackUse = false;
2217     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2218                                        IsNonNull, TrackUse);
2219     State.setKnown(IsNonNull);
2220     return TrackUse;
2221   }
2222 
2223   /// See AbstractAttribute::getAsStr().
2224   const std::string getAsStr() const override {
2225     return getAssumed() ? "nonnull" : "may-null";
2226   }
2227 
2228   /// Flag to determine if the underlying value can be null and still allow
2229   /// valid accesses.
2230   const bool NullIsDefined;
2231 };
2232 
2233 /// NonNull attribute for a floating value.
2234 struct AANonNullFloating : public AANonNullImpl {
2235   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2236       : AANonNullImpl(IRP, A) {}
2237 
2238   /// See AbstractAttribute::updateImpl(...).
2239   ChangeStatus updateImpl(Attributor &A) override {
2240     const DataLayout &DL = A.getDataLayout();
2241 
2242     DominatorTree *DT = nullptr;
2243     AssumptionCache *AC = nullptr;
2244     InformationCache &InfoCache = A.getInfoCache();
2245     if (const Function *Fn = getAnchorScope()) {
2246       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2247       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2248     }
2249 
2250     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2251                             AANonNull::StateType &T, bool Stripped) -> bool {
2252       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2253                                              DepClassTy::REQUIRED);
2254       if (!Stripped && this == &AA) {
2255         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2256           T.indicatePessimisticFixpoint();
2257       } else {
2258         // Use abstract attribute information.
2259         const AANonNull::StateType &NS = AA.getState();
2260         T ^= NS;
2261       }
2262       return T.isValidState();
2263     };
2264 
2265     StateType T;
2266     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2267                                           VisitValueCB, getCtxI()))
2268       return indicatePessimisticFixpoint();
2269 
2270     return clampStateAndIndicateChange(getState(), T);
2271   }
2272 
2273   /// See AbstractAttribute::trackStatistics()
2274   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2275 };
2276 
2277 /// NonNull attribute for function return value.
2278 struct AANonNullReturned final
2279     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2280   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2281       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2282 
2283   /// See AbstractAttribute::getAsStr().
2284   const std::string getAsStr() const override {
2285     return getAssumed() ? "nonnull" : "may-null";
2286   }
2287 
2288   /// See AbstractAttribute::trackStatistics()
2289   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2290 };
2291 
2292 /// NonNull attribute for function argument.
2293 struct AANonNullArgument final
2294     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2295   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2296       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2297 
2298   /// See AbstractAttribute::trackStatistics()
2299   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2300 };
2301 
2302 struct AANonNullCallSiteArgument final : AANonNullFloating {
2303   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2304       : AANonNullFloating(IRP, A) {}
2305 
2306   /// See AbstractAttribute::trackStatistics()
2307   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2308 };
2309 
2310 /// NonNull attribute for a call site return position.
2311 struct AANonNullCallSiteReturned final
2312     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2313   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2314       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2315 
2316   /// See AbstractAttribute::trackStatistics()
2317   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2318 };
2319 
2320 /// ------------------------ No-Recurse Attributes ----------------------------
2321 
2322 struct AANoRecurseImpl : public AANoRecurse {
2323   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2324 
2325   /// See AbstractAttribute::getAsStr()
2326   const std::string getAsStr() const override {
2327     return getAssumed() ? "norecurse" : "may-recurse";
2328   }
2329 };
2330 
2331 struct AANoRecurseFunction final : AANoRecurseImpl {
2332   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2333       : AANoRecurseImpl(IRP, A) {}
2334 
2335   /// See AbstractAttribute::initialize(...).
2336   void initialize(Attributor &A) override {
2337     AANoRecurseImpl::initialize(A);
2338     if (const Function *F = getAnchorScope())
2339       if (A.getInfoCache().getSccSize(*F) != 1)
2340         indicatePessimisticFixpoint();
2341   }
2342 
2343   /// See AbstractAttribute::updateImpl(...).
2344   ChangeStatus updateImpl(Attributor &A) override {
2345 
2346     // If all live call sites are known to be no-recurse, we are as well.
2347     auto CallSitePred = [&](AbstractCallSite ACS) {
2348       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2349           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2350           DepClassTy::NONE);
2351       return NoRecurseAA.isKnownNoRecurse();
2352     };
2353     bool AllCallSitesKnown;
2354     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2355       // If we know all call sites and all are known no-recurse, we are done.
2356       // If all known call sites, which might not be all that exist, are known
2357       // to be no-recurse, we are not done but we can continue to assume
2358       // no-recurse. If one of the call sites we have not visited will become
2359       // live, another update is triggered.
2360       if (AllCallSitesKnown)
2361         indicateOptimisticFixpoint();
2362       return ChangeStatus::UNCHANGED;
2363     }
2364 
2365     // If the above check does not hold anymore we look at the calls.
2366     auto CheckForNoRecurse = [&](Instruction &I) {
2367       const auto &CB = cast<CallBase>(I);
2368       if (CB.hasFnAttr(Attribute::NoRecurse))
2369         return true;
2370 
2371       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2372           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2373       if (!NoRecurseAA.isAssumedNoRecurse())
2374         return false;
2375 
2376       // Recursion to the same function
2377       if (CB.getCalledFunction() == getAnchorScope())
2378         return false;
2379 
2380       return true;
2381     };
2382 
2383     bool UsedAssumedInformation = false;
2384     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2385                                            UsedAssumedInformation))
2386       return indicatePessimisticFixpoint();
2387     return ChangeStatus::UNCHANGED;
2388   }
2389 
2390   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2391 };
2392 
2393 /// NoRecurse attribute deduction for a call sites.
2394 struct AANoRecurseCallSite final : AANoRecurseImpl {
2395   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2396       : AANoRecurseImpl(IRP, A) {}
2397 
2398   /// See AbstractAttribute::initialize(...).
2399   void initialize(Attributor &A) override {
2400     AANoRecurseImpl::initialize(A);
2401     Function *F = getAssociatedFunction();
2402     if (!F || F->isDeclaration())
2403       indicatePessimisticFixpoint();
2404   }
2405 
2406   /// See AbstractAttribute::updateImpl(...).
2407   ChangeStatus updateImpl(Attributor &A) override {
2408     // TODO: Once we have call site specific value information we can provide
2409     //       call site specific liveness information and then it makes
2410     //       sense to specialize attributes for call sites arguments instead of
2411     //       redirecting requests to the callee argument.
2412     Function *F = getAssociatedFunction();
2413     const IRPosition &FnPos = IRPosition::function(*F);
2414     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2415     return clampStateAndIndicateChange(getState(), FnAA.getState());
2416   }
2417 
2418   /// See AbstractAttribute::trackStatistics()
2419   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2420 };
2421 
2422 /// -------------------- Undefined-Behavior Attributes ------------------------
2423 
2424 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2425   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2426       : AAUndefinedBehavior(IRP, A) {}
2427 
2428   /// See AbstractAttribute::updateImpl(...).
2429   // through a pointer (i.e. also branches etc.)
2430   ChangeStatus updateImpl(Attributor &A) override {
2431     const size_t UBPrevSize = KnownUBInsts.size();
2432     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2433 
2434     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2435       // Lang ref now states volatile store is not UB, let's skip them.
2436       if (I.isVolatile() && I.mayWriteToMemory())
2437         return true;
2438 
2439       // Skip instructions that are already saved.
2440       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2441         return true;
2442 
2443       // If we reach here, we know we have an instruction
2444       // that accesses memory through a pointer operand,
2445       // for which getPointerOperand() should give it to us.
2446       Value *PtrOp =
2447           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2448       assert(PtrOp &&
2449              "Expected pointer operand of memory accessing instruction");
2450 
2451       // Either we stopped and the appropriate action was taken,
2452       // or we got back a simplified value to continue.
2453       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2454       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2455         return true;
2456       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2457 
2458       // A memory access through a pointer is considered UB
2459       // only if the pointer has constant null value.
2460       // TODO: Expand it to not only check constant values.
2461       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2462         AssumedNoUBInsts.insert(&I);
2463         return true;
2464       }
2465       const Type *PtrTy = PtrOpVal->getType();
2466 
2467       // Because we only consider instructions inside functions,
2468       // assume that a parent function exists.
2469       const Function *F = I.getFunction();
2470 
2471       // A memory access using constant null pointer is only considered UB
2472       // if null pointer is _not_ defined for the target platform.
2473       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2474         AssumedNoUBInsts.insert(&I);
2475       else
2476         KnownUBInsts.insert(&I);
2477       return true;
2478     };
2479 
2480     auto InspectBrInstForUB = [&](Instruction &I) {
2481       // A conditional branch instruction is considered UB if it has `undef`
2482       // condition.
2483 
2484       // Skip instructions that are already saved.
2485       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2486         return true;
2487 
2488       // We know we have a branch instruction.
2489       auto *BrInst = cast<BranchInst>(&I);
2490 
2491       // Unconditional branches are never considered UB.
2492       if (BrInst->isUnconditional())
2493         return true;
2494 
2495       // Either we stopped and the appropriate action was taken,
2496       // or we got back a simplified value to continue.
2497       Optional<Value *> SimplifiedCond =
2498           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2499       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2500         return true;
2501       AssumedNoUBInsts.insert(&I);
2502       return true;
2503     };
2504 
2505     auto InspectCallSiteForUB = [&](Instruction &I) {
2506       // Check whether a callsite always cause UB or not
2507 
2508       // Skip instructions that are already saved.
2509       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2510         return true;
2511 
2512       // Check nonnull and noundef argument attribute violation for each
2513       // callsite.
2514       CallBase &CB = cast<CallBase>(I);
2515       Function *Callee = CB.getCalledFunction();
2516       if (!Callee)
2517         return true;
2518       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2519         // If current argument is known to be simplified to null pointer and the
2520         // corresponding argument position is known to have nonnull attribute,
2521         // the argument is poison. Furthermore, if the argument is poison and
2522         // the position is known to have noundef attriubte, this callsite is
2523         // considered UB.
2524         if (idx >= Callee->arg_size())
2525           break;
2526         Value *ArgVal = CB.getArgOperand(idx);
2527         if (!ArgVal)
2528           continue;
2529         // Here, we handle three cases.
2530         //   (1) Not having a value means it is dead. (we can replace the value
2531         //       with undef)
2532         //   (2) Simplified to undef. The argument violate noundef attriubte.
2533         //   (3) Simplified to null pointer where known to be nonnull.
2534         //       The argument is a poison value and violate noundef attribute.
2535         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2536         auto &NoUndefAA =
2537             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2538         if (!NoUndefAA.isKnownNoUndef())
2539           continue;
2540         bool UsedAssumedInformation = false;
2541         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2542             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2543         if (UsedAssumedInformation)
2544           continue;
2545         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2546           return true;
2547         if (!SimplifiedVal.hasValue() ||
2548             isa<UndefValue>(*SimplifiedVal.getValue())) {
2549           KnownUBInsts.insert(&I);
2550           continue;
2551         }
2552         if (!ArgVal->getType()->isPointerTy() ||
2553             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2554           continue;
2555         auto &NonNullAA =
2556             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2557         if (NonNullAA.isKnownNonNull())
2558           KnownUBInsts.insert(&I);
2559       }
2560       return true;
2561     };
2562 
2563     auto InspectReturnInstForUB =
2564         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2565           // Check if a return instruction always cause UB or not
2566           // Note: It is guaranteed that the returned position of the anchor
2567           //       scope has noundef attribute when this is called.
2568           //       We also ensure the return position is not "assumed dead"
2569           //       because the returned value was then potentially simplified to
2570           //       `undef` in AAReturnedValues without removing the `noundef`
2571           //       attribute yet.
2572 
2573           // When the returned position has noundef attriubte, UB occur in the
2574           // following cases.
2575           //   (1) Returned value is known to be undef.
2576           //   (2) The value is known to be a null pointer and the returned
2577           //       position has nonnull attribute (because the returned value is
2578           //       poison).
2579           bool FoundUB = false;
2580           if (isa<UndefValue>(V)) {
2581             FoundUB = true;
2582           } else {
2583             if (isa<ConstantPointerNull>(V)) {
2584               auto &NonNullAA = A.getAAFor<AANonNull>(
2585                   *this, IRPosition::returned(*getAnchorScope()),
2586                   DepClassTy::NONE);
2587               if (NonNullAA.isKnownNonNull())
2588                 FoundUB = true;
2589             }
2590           }
2591 
2592           if (FoundUB)
2593             for (ReturnInst *RI : RetInsts)
2594               KnownUBInsts.insert(RI);
2595           return true;
2596         };
2597 
2598     bool UsedAssumedInformation = false;
2599     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2600                               {Instruction::Load, Instruction::Store,
2601                                Instruction::AtomicCmpXchg,
2602                                Instruction::AtomicRMW},
2603                               UsedAssumedInformation,
2604                               /* CheckBBLivenessOnly */ true);
2605     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2606                               UsedAssumedInformation,
2607                               /* CheckBBLivenessOnly */ true);
2608     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2609                                       UsedAssumedInformation);
2610 
2611     // If the returned position of the anchor scope has noundef attriubte, check
2612     // all returned instructions.
2613     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2614       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2615       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2616         auto &RetPosNoUndefAA =
2617             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2618         if (RetPosNoUndefAA.isKnownNoUndef())
2619           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2620                                                     *this);
2621       }
2622     }
2623 
2624     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2625         UBPrevSize != KnownUBInsts.size())
2626       return ChangeStatus::CHANGED;
2627     return ChangeStatus::UNCHANGED;
2628   }
2629 
2630   bool isKnownToCauseUB(Instruction *I) const override {
2631     return KnownUBInsts.count(I);
2632   }
2633 
2634   bool isAssumedToCauseUB(Instruction *I) const override {
2635     // In simple words, if an instruction is not in the assumed to _not_
2636     // cause UB, then it is assumed UB (that includes those
2637     // in the KnownUBInsts set). The rest is boilerplate
2638     // is to ensure that it is one of the instructions we test
2639     // for UB.
2640 
2641     switch (I->getOpcode()) {
2642     case Instruction::Load:
2643     case Instruction::Store:
2644     case Instruction::AtomicCmpXchg:
2645     case Instruction::AtomicRMW:
2646       return !AssumedNoUBInsts.count(I);
2647     case Instruction::Br: {
2648       auto BrInst = cast<BranchInst>(I);
2649       if (BrInst->isUnconditional())
2650         return false;
2651       return !AssumedNoUBInsts.count(I);
2652     } break;
2653     default:
2654       return false;
2655     }
2656     return false;
2657   }
2658 
2659   ChangeStatus manifest(Attributor &A) override {
2660     if (KnownUBInsts.empty())
2661       return ChangeStatus::UNCHANGED;
2662     for (Instruction *I : KnownUBInsts)
2663       A.changeToUnreachableAfterManifest(I);
2664     return ChangeStatus::CHANGED;
2665   }
2666 
2667   /// See AbstractAttribute::getAsStr()
2668   const std::string getAsStr() const override {
2669     return getAssumed() ? "undefined-behavior" : "no-ub";
2670   }
2671 
2672   /// Note: The correctness of this analysis depends on the fact that the
2673   /// following 2 sets will stop changing after some point.
2674   /// "Change" here means that their size changes.
2675   /// The size of each set is monotonically increasing
2676   /// (we only add items to them) and it is upper bounded by the number of
2677   /// instructions in the processed function (we can never save more
2678   /// elements in either set than this number). Hence, at some point,
2679   /// they will stop increasing.
2680   /// Consequently, at some point, both sets will have stopped
2681   /// changing, effectively making the analysis reach a fixpoint.
2682 
2683   /// Note: These 2 sets are disjoint and an instruction can be considered
2684   /// one of 3 things:
2685   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2686   ///    the KnownUBInsts set.
2687   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2688   ///    has a reason to assume it).
2689   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2690   ///    could not find a reason to assume or prove that it can cause UB,
2691   ///    hence it assumes it doesn't. We have a set for these instructions
2692   ///    so that we don't reprocess them in every update.
2693   ///    Note however that instructions in this set may cause UB.
2694 
2695 protected:
2696   /// A set of all live instructions _known_ to cause UB.
2697   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2698 
2699 private:
2700   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2701   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2702 
2703   // Should be called on updates in which if we're processing an instruction
2704   // \p I that depends on a value \p V, one of the following has to happen:
2705   // - If the value is assumed, then stop.
2706   // - If the value is known but undef, then consider it UB.
2707   // - Otherwise, do specific processing with the simplified value.
2708   // We return None in the first 2 cases to signify that an appropriate
2709   // action was taken and the caller should stop.
2710   // Otherwise, we return the simplified value that the caller should
2711   // use for specific processing.
2712   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2713                                          Instruction *I) {
2714     bool UsedAssumedInformation = false;
2715     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2716         IRPosition::value(*V), *this, UsedAssumedInformation);
2717     if (!UsedAssumedInformation) {
2718       // Don't depend on assumed values.
2719       if (!SimplifiedV.hasValue()) {
2720         // If it is known (which we tested above) but it doesn't have a value,
2721         // then we can assume `undef` and hence the instruction is UB.
2722         KnownUBInsts.insert(I);
2723         return llvm::None;
2724       }
2725       if (!SimplifiedV.getValue())
2726         return nullptr;
2727       V = *SimplifiedV;
2728     }
2729     if (isa<UndefValue>(V)) {
2730       KnownUBInsts.insert(I);
2731       return llvm::None;
2732     }
2733     return V;
2734   }
2735 };
2736 
2737 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2738   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2739       : AAUndefinedBehaviorImpl(IRP, A) {}
2740 
2741   /// See AbstractAttribute::trackStatistics()
2742   void trackStatistics() const override {
2743     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2744                "Number of instructions known to have UB");
2745     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2746         KnownUBInsts.size();
2747   }
2748 };
2749 
2750 /// ------------------------ Will-Return Attributes ----------------------------
2751 
2752 // Helper function that checks whether a function has any cycle which we don't
2753 // know if it is bounded or not.
2754 // Loops with maximum trip count are considered bounded, any other cycle not.
2755 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2756   ScalarEvolution *SE =
2757       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2758   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2759   // If either SCEV or LoopInfo is not available for the function then we assume
2760   // any cycle to be unbounded cycle.
2761   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2762   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2763   if (!SE || !LI) {
2764     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2765       if (SCCI.hasCycle())
2766         return true;
2767     return false;
2768   }
2769 
2770   // If there's irreducible control, the function may contain non-loop cycles.
2771   if (mayContainIrreducibleControl(F, LI))
2772     return true;
2773 
2774   // Any loop that does not have a max trip count is considered unbounded cycle.
2775   for (auto *L : LI->getLoopsInPreorder()) {
2776     if (!SE->getSmallConstantMaxTripCount(L))
2777       return true;
2778   }
2779   return false;
2780 }
2781 
2782 struct AAWillReturnImpl : public AAWillReturn {
2783   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2784       : AAWillReturn(IRP, A) {}
2785 
2786   /// See AbstractAttribute::initialize(...).
2787   void initialize(Attributor &A) override {
2788     AAWillReturn::initialize(A);
2789 
2790     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2791       indicateOptimisticFixpoint();
2792       return;
2793     }
2794   }
2795 
2796   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2797   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2798     // Check for `mustprogress` in the scope and the associated function which
2799     // might be different if this is a call site.
2800     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2801         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2802       return false;
2803 
2804     const auto &MemAA =
2805         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2806     if (!MemAA.isAssumedReadOnly())
2807       return false;
2808     if (KnownOnly && !MemAA.isKnownReadOnly())
2809       return false;
2810     if (!MemAA.isKnownReadOnly())
2811       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2812 
2813     return true;
2814   }
2815 
2816   /// See AbstractAttribute::updateImpl(...).
2817   ChangeStatus updateImpl(Attributor &A) override {
2818     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2819       return ChangeStatus::UNCHANGED;
2820 
2821     auto CheckForWillReturn = [&](Instruction &I) {
2822       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2823       const auto &WillReturnAA =
2824           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2825       if (WillReturnAA.isKnownWillReturn())
2826         return true;
2827       if (!WillReturnAA.isAssumedWillReturn())
2828         return false;
2829       const auto &NoRecurseAA =
2830           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2831       return NoRecurseAA.isAssumedNoRecurse();
2832     };
2833 
2834     bool UsedAssumedInformation = false;
2835     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2836                                            UsedAssumedInformation))
2837       return indicatePessimisticFixpoint();
2838 
2839     return ChangeStatus::UNCHANGED;
2840   }
2841 
2842   /// See AbstractAttribute::getAsStr()
2843   const std::string getAsStr() const override {
2844     return getAssumed() ? "willreturn" : "may-noreturn";
2845   }
2846 };
2847 
2848 struct AAWillReturnFunction final : AAWillReturnImpl {
2849   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2850       : AAWillReturnImpl(IRP, A) {}
2851 
2852   /// See AbstractAttribute::initialize(...).
2853   void initialize(Attributor &A) override {
2854     AAWillReturnImpl::initialize(A);
2855 
2856     Function *F = getAnchorScope();
2857     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2858       indicatePessimisticFixpoint();
2859   }
2860 
2861   /// See AbstractAttribute::trackStatistics()
2862   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2863 };
2864 
2865 /// WillReturn attribute deduction for a call sites.
2866 struct AAWillReturnCallSite final : AAWillReturnImpl {
2867   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2868       : AAWillReturnImpl(IRP, A) {}
2869 
2870   /// See AbstractAttribute::initialize(...).
2871   void initialize(Attributor &A) override {
2872     AAWillReturnImpl::initialize(A);
2873     Function *F = getAssociatedFunction();
2874     if (!F || !A.isFunctionIPOAmendable(*F))
2875       indicatePessimisticFixpoint();
2876   }
2877 
2878   /// See AbstractAttribute::updateImpl(...).
2879   ChangeStatus updateImpl(Attributor &A) override {
2880     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2881       return ChangeStatus::UNCHANGED;
2882 
2883     // TODO: Once we have call site specific value information we can provide
2884     //       call site specific liveness information and then it makes
2885     //       sense to specialize attributes for call sites arguments instead of
2886     //       redirecting requests to the callee argument.
2887     Function *F = getAssociatedFunction();
2888     const IRPosition &FnPos = IRPosition::function(*F);
2889     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2890     return clampStateAndIndicateChange(getState(), FnAA.getState());
2891   }
2892 
2893   /// See AbstractAttribute::trackStatistics()
2894   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2895 };
2896 
2897 /// -------------------AAReachability Attribute--------------------------
2898 
2899 struct AAReachabilityImpl : AAReachability {
2900   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2901       : AAReachability(IRP, A) {}
2902 
2903   const std::string getAsStr() const override {
2904     // TODO: Return the number of reachable queries.
2905     return "reachable";
2906   }
2907 
2908   /// See AbstractAttribute::updateImpl(...).
2909   ChangeStatus updateImpl(Attributor &A) override {
2910     return ChangeStatus::UNCHANGED;
2911   }
2912 };
2913 
2914 struct AAReachabilityFunction final : public AAReachabilityImpl {
2915   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2916       : AAReachabilityImpl(IRP, A) {}
2917 
2918   /// See AbstractAttribute::trackStatistics()
2919   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2920 };
2921 
2922 /// ------------------------ NoAlias Argument Attribute ------------------------
2923 
2924 struct AANoAliasImpl : AANoAlias {
2925   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2926     assert(getAssociatedType()->isPointerTy() &&
2927            "Noalias is a pointer attribute");
2928   }
2929 
2930   const std::string getAsStr() const override {
2931     return getAssumed() ? "noalias" : "may-alias";
2932   }
2933 };
2934 
2935 /// NoAlias attribute for a floating value.
2936 struct AANoAliasFloating final : AANoAliasImpl {
2937   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2938       : AANoAliasImpl(IRP, A) {}
2939 
2940   /// See AbstractAttribute::initialize(...).
2941   void initialize(Attributor &A) override {
2942     AANoAliasImpl::initialize(A);
2943     Value *Val = &getAssociatedValue();
2944     do {
2945       CastInst *CI = dyn_cast<CastInst>(Val);
2946       if (!CI)
2947         break;
2948       Value *Base = CI->getOperand(0);
2949       if (!Base->hasOneUse())
2950         break;
2951       Val = Base;
2952     } while (true);
2953 
2954     if (!Val->getType()->isPointerTy()) {
2955       indicatePessimisticFixpoint();
2956       return;
2957     }
2958 
2959     if (isa<AllocaInst>(Val))
2960       indicateOptimisticFixpoint();
2961     else if (isa<ConstantPointerNull>(Val) &&
2962              !NullPointerIsDefined(getAnchorScope(),
2963                                    Val->getType()->getPointerAddressSpace()))
2964       indicateOptimisticFixpoint();
2965     else if (Val != &getAssociatedValue()) {
2966       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2967           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2968       if (ValNoAliasAA.isKnownNoAlias())
2969         indicateOptimisticFixpoint();
2970     }
2971   }
2972 
2973   /// See AbstractAttribute::updateImpl(...).
2974   ChangeStatus updateImpl(Attributor &A) override {
2975     // TODO: Implement this.
2976     return indicatePessimisticFixpoint();
2977   }
2978 
2979   /// See AbstractAttribute::trackStatistics()
2980   void trackStatistics() const override {
2981     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2982   }
2983 };
2984 
2985 /// NoAlias attribute for an argument.
2986 struct AANoAliasArgument final
2987     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2988   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2989   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2990 
2991   /// See AbstractAttribute::initialize(...).
2992   void initialize(Attributor &A) override {
2993     Base::initialize(A);
2994     // See callsite argument attribute and callee argument attribute.
2995     if (hasAttr({Attribute::ByVal}))
2996       indicateOptimisticFixpoint();
2997   }
2998 
2999   /// See AbstractAttribute::update(...).
3000   ChangeStatus updateImpl(Attributor &A) override {
3001     // We have to make sure no-alias on the argument does not break
3002     // synchronization when this is a callback argument, see also [1] below.
3003     // If synchronization cannot be affected, we delegate to the base updateImpl
3004     // function, otherwise we give up for now.
3005 
3006     // If the function is no-sync, no-alias cannot break synchronization.
3007     const auto &NoSyncAA =
3008         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
3009                              DepClassTy::OPTIONAL);
3010     if (NoSyncAA.isAssumedNoSync())
3011       return Base::updateImpl(A);
3012 
3013     // If the argument is read-only, no-alias cannot break synchronization.
3014     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3015         *this, getIRPosition(), DepClassTy::OPTIONAL);
3016     if (MemBehaviorAA.isAssumedReadOnly())
3017       return Base::updateImpl(A);
3018 
3019     // If the argument is never passed through callbacks, no-alias cannot break
3020     // synchronization.
3021     bool AllCallSitesKnown;
3022     if (A.checkForAllCallSites(
3023             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3024             true, AllCallSitesKnown))
3025       return Base::updateImpl(A);
3026 
3027     // TODO: add no-alias but make sure it doesn't break synchronization by
3028     // introducing fake uses. See:
3029     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3030     //     International Workshop on OpenMP 2018,
3031     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3032 
3033     return indicatePessimisticFixpoint();
3034   }
3035 
3036   /// See AbstractAttribute::trackStatistics()
3037   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3038 };
3039 
3040 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3041   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3042       : AANoAliasImpl(IRP, A) {}
3043 
3044   /// See AbstractAttribute::initialize(...).
3045   void initialize(Attributor &A) override {
3046     // See callsite argument attribute and callee argument attribute.
3047     const auto &CB = cast<CallBase>(getAnchorValue());
3048     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3049       indicateOptimisticFixpoint();
3050     Value &Val = getAssociatedValue();
3051     if (isa<ConstantPointerNull>(Val) &&
3052         !NullPointerIsDefined(getAnchorScope(),
3053                               Val.getType()->getPointerAddressSpace()))
3054       indicateOptimisticFixpoint();
3055   }
3056 
3057   /// Determine if the underlying value may alias with the call site argument
3058   /// \p OtherArgNo of \p ICS (= the underlying call site).
3059   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3060                             const AAMemoryBehavior &MemBehaviorAA,
3061                             const CallBase &CB, unsigned OtherArgNo) {
3062     // We do not need to worry about aliasing with the underlying IRP.
3063     if (this->getCalleeArgNo() == (int)OtherArgNo)
3064       return false;
3065 
3066     // If it is not a pointer or pointer vector we do not alias.
3067     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3068     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3069       return false;
3070 
3071     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3072         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3073 
3074     // If the argument is readnone, there is no read-write aliasing.
3075     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3076       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3077       return false;
3078     }
3079 
3080     // If the argument is readonly and the underlying value is readonly, there
3081     // is no read-write aliasing.
3082     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3083     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3084       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3085       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3086       return false;
3087     }
3088 
3089     // We have to utilize actual alias analysis queries so we need the object.
3090     if (!AAR)
3091       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3092 
3093     // Try to rule it out at the call site.
3094     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3095     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3096                          "callsite arguments: "
3097                       << getAssociatedValue() << " " << *ArgOp << " => "
3098                       << (IsAliasing ? "" : "no-") << "alias \n");
3099 
3100     return IsAliasing;
3101   }
3102 
3103   bool
3104   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3105                                          const AAMemoryBehavior &MemBehaviorAA,
3106                                          const AANoAlias &NoAliasAA) {
3107     // We can deduce "noalias" if the following conditions hold.
3108     // (i)   Associated value is assumed to be noalias in the definition.
3109     // (ii)  Associated value is assumed to be no-capture in all the uses
3110     //       possibly executed before this callsite.
3111     // (iii) There is no other pointer argument which could alias with the
3112     //       value.
3113 
3114     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3115     if (!AssociatedValueIsNoAliasAtDef) {
3116       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3117                         << " is not no-alias at the definition\n");
3118       return false;
3119     }
3120 
3121     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3122 
3123     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3124     const Function *ScopeFn = VIRP.getAnchorScope();
3125     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3126     // Check whether the value is captured in the scope using AANoCapture.
3127     //      Look at CFG and check only uses possibly executed before this
3128     //      callsite.
3129     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3130       Instruction *UserI = cast<Instruction>(U.getUser());
3131 
3132       // If UserI is the curr instruction and there is a single potential use of
3133       // the value in UserI we allow the use.
3134       // TODO: We should inspect the operands and allow those that cannot alias
3135       //       with the value.
3136       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3137         return true;
3138 
3139       if (ScopeFn) {
3140         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3141             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3142 
3143         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3144           return true;
3145 
3146         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3147           if (CB->isArgOperand(&U)) {
3148 
3149             unsigned ArgNo = CB->getArgOperandNo(&U);
3150 
3151             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3152                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3153                 DepClassTy::OPTIONAL);
3154 
3155             if (NoCaptureAA.isAssumedNoCapture())
3156               return true;
3157           }
3158         }
3159       }
3160 
3161       // For cases which can potentially have more users
3162       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3163           isa<SelectInst>(U)) {
3164         Follow = true;
3165         return true;
3166       }
3167 
3168       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3169       return false;
3170     };
3171 
3172     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3173       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3174         LLVM_DEBUG(
3175             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3176                    << " cannot be noalias as it is potentially captured\n");
3177         return false;
3178       }
3179     }
3180     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3181 
3182     // Check there is no other pointer argument which could alias with the
3183     // value passed at this call site.
3184     // TODO: AbstractCallSite
3185     const auto &CB = cast<CallBase>(getAnchorValue());
3186     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
3187          OtherArgNo++)
3188       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3189         return false;
3190 
3191     return true;
3192   }
3193 
3194   /// See AbstractAttribute::updateImpl(...).
3195   ChangeStatus updateImpl(Attributor &A) override {
3196     // If the argument is readnone we are done as there are no accesses via the
3197     // argument.
3198     auto &MemBehaviorAA =
3199         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3200     if (MemBehaviorAA.isAssumedReadNone()) {
3201       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3202       return ChangeStatus::UNCHANGED;
3203     }
3204 
3205     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3206     const auto &NoAliasAA =
3207         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3208 
3209     AAResults *AAR = nullptr;
3210     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3211                                                NoAliasAA)) {
3212       LLVM_DEBUG(
3213           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3214       return ChangeStatus::UNCHANGED;
3215     }
3216 
3217     return indicatePessimisticFixpoint();
3218   }
3219 
3220   /// See AbstractAttribute::trackStatistics()
3221   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3222 };
3223 
3224 /// NoAlias attribute for function return value.
3225 struct AANoAliasReturned final : AANoAliasImpl {
3226   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3227       : AANoAliasImpl(IRP, A) {}
3228 
3229   /// See AbstractAttribute::initialize(...).
3230   void initialize(Attributor &A) override {
3231     AANoAliasImpl::initialize(A);
3232     Function *F = getAssociatedFunction();
3233     if (!F || F->isDeclaration())
3234       indicatePessimisticFixpoint();
3235   }
3236 
3237   /// See AbstractAttribute::updateImpl(...).
3238   virtual ChangeStatus updateImpl(Attributor &A) override {
3239 
3240     auto CheckReturnValue = [&](Value &RV) -> bool {
3241       if (Constant *C = dyn_cast<Constant>(&RV))
3242         if (C->isNullValue() || isa<UndefValue>(C))
3243           return true;
3244 
3245       /// For now, we can only deduce noalias if we have call sites.
3246       /// FIXME: add more support.
3247       if (!isa<CallBase>(&RV))
3248         return false;
3249 
3250       const IRPosition &RVPos = IRPosition::value(RV);
3251       const auto &NoAliasAA =
3252           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3253       if (!NoAliasAA.isAssumedNoAlias())
3254         return false;
3255 
3256       const auto &NoCaptureAA =
3257           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3258       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3259     };
3260 
3261     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3262       return indicatePessimisticFixpoint();
3263 
3264     return ChangeStatus::UNCHANGED;
3265   }
3266 
3267   /// See AbstractAttribute::trackStatistics()
3268   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3269 };
3270 
3271 /// NoAlias attribute deduction for a call site return value.
3272 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3273   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3274       : AANoAliasImpl(IRP, A) {}
3275 
3276   /// See AbstractAttribute::initialize(...).
3277   void initialize(Attributor &A) override {
3278     AANoAliasImpl::initialize(A);
3279     Function *F = getAssociatedFunction();
3280     if (!F || F->isDeclaration())
3281       indicatePessimisticFixpoint();
3282   }
3283 
3284   /// See AbstractAttribute::updateImpl(...).
3285   ChangeStatus updateImpl(Attributor &A) override {
3286     // TODO: Once we have call site specific value information we can provide
3287     //       call site specific liveness information and then it makes
3288     //       sense to specialize attributes for call sites arguments instead of
3289     //       redirecting requests to the callee argument.
3290     Function *F = getAssociatedFunction();
3291     const IRPosition &FnPos = IRPosition::returned(*F);
3292     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3293     return clampStateAndIndicateChange(getState(), FnAA.getState());
3294   }
3295 
3296   /// See AbstractAttribute::trackStatistics()
3297   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3298 };
3299 
3300 /// -------------------AAIsDead Function Attribute-----------------------
3301 
3302 struct AAIsDeadValueImpl : public AAIsDead {
3303   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3304 
3305   /// See AAIsDead::isAssumedDead().
3306   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3307 
3308   /// See AAIsDead::isKnownDead().
3309   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3310 
3311   /// See AAIsDead::isAssumedDead(BasicBlock *).
3312   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3313 
3314   /// See AAIsDead::isKnownDead(BasicBlock *).
3315   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3316 
3317   /// See AAIsDead::isAssumedDead(Instruction *I).
3318   bool isAssumedDead(const Instruction *I) const override {
3319     return I == getCtxI() && isAssumedDead();
3320   }
3321 
3322   /// See AAIsDead::isKnownDead(Instruction *I).
3323   bool isKnownDead(const Instruction *I) const override {
3324     return isAssumedDead(I) && isKnownDead();
3325   }
3326 
3327   /// See AbstractAttribute::getAsStr().
3328   const std::string getAsStr() const override {
3329     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3330   }
3331 
3332   /// Check if all uses are assumed dead.
3333   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3334     // Callers might not check the type, void has no uses.
3335     if (V.getType()->isVoidTy())
3336       return true;
3337 
3338     // If we replace a value with a constant there are no uses left afterwards.
3339     if (!isa<Constant>(V)) {
3340       bool UsedAssumedInformation = false;
3341       Optional<Constant *> C =
3342           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3343       if (!C.hasValue() || *C)
3344         return true;
3345     }
3346 
3347     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3348     // Explicitly set the dependence class to required because we want a long
3349     // chain of N dependent instructions to be considered live as soon as one is
3350     // without going through N update cycles. This is not required for
3351     // correctness.
3352     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3353                              DepClassTy::REQUIRED);
3354   }
3355 
3356   /// Determine if \p I is assumed to be side-effect free.
3357   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3358     if (!I || wouldInstructionBeTriviallyDead(I))
3359       return true;
3360 
3361     auto *CB = dyn_cast<CallBase>(I);
3362     if (!CB || isa<IntrinsicInst>(CB))
3363       return false;
3364 
3365     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3366     const auto &NoUnwindAA =
3367         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3368     if (!NoUnwindAA.isAssumedNoUnwind())
3369       return false;
3370     if (!NoUnwindAA.isKnownNoUnwind())
3371       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3372 
3373     const auto &MemBehaviorAA =
3374         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3375     if (MemBehaviorAA.isAssumedReadOnly()) {
3376       if (!MemBehaviorAA.isKnownReadOnly())
3377         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3378       return true;
3379     }
3380     return false;
3381   }
3382 };
3383 
3384 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3385   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3386       : AAIsDeadValueImpl(IRP, A) {}
3387 
3388   /// See AbstractAttribute::initialize(...).
3389   void initialize(Attributor &A) override {
3390     if (isa<UndefValue>(getAssociatedValue())) {
3391       indicatePessimisticFixpoint();
3392       return;
3393     }
3394 
3395     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3396     if (!isAssumedSideEffectFree(A, I)) {
3397       if (!isa_and_nonnull<StoreInst>(I))
3398         indicatePessimisticFixpoint();
3399       else
3400         removeAssumedBits(HAS_NO_EFFECT);
3401     }
3402   }
3403 
3404   bool isDeadStore(Attributor &A, StoreInst &SI) {
3405     // Lang ref now states volatile store is not UB/dead, let's skip them.
3406     if (SI.isVolatile())
3407       return false;
3408 
3409     bool UsedAssumedInformation = false;
3410     SmallSetVector<Value *, 4> PotentialCopies;
3411     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3412                                              UsedAssumedInformation))
3413       return false;
3414     return llvm::all_of(PotentialCopies, [&](Value *V) {
3415       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3416                              UsedAssumedInformation);
3417     });
3418   }
3419 
3420   /// See AbstractAttribute::updateImpl(...).
3421   ChangeStatus updateImpl(Attributor &A) override {
3422     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3423     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3424       if (!isDeadStore(A, *SI))
3425         return indicatePessimisticFixpoint();
3426     } else {
3427       if (!isAssumedSideEffectFree(A, I))
3428         return indicatePessimisticFixpoint();
3429       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3430         return indicatePessimisticFixpoint();
3431     }
3432     return ChangeStatus::UNCHANGED;
3433   }
3434 
3435   /// See AbstractAttribute::manifest(...).
3436   ChangeStatus manifest(Attributor &A) override {
3437     Value &V = getAssociatedValue();
3438     if (auto *I = dyn_cast<Instruction>(&V)) {
3439       // If we get here we basically know the users are all dead. We check if
3440       // isAssumedSideEffectFree returns true here again because it might not be
3441       // the case and only the users are dead but the instruction (=call) is
3442       // still needed.
3443       if (isa<StoreInst>(I) ||
3444           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3445         A.deleteAfterManifest(*I);
3446         return ChangeStatus::CHANGED;
3447       }
3448     }
3449     if (V.use_empty())
3450       return ChangeStatus::UNCHANGED;
3451 
3452     bool UsedAssumedInformation = false;
3453     Optional<Constant *> C =
3454         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3455     if (C.hasValue() && C.getValue())
3456       return ChangeStatus::UNCHANGED;
3457 
3458     // Replace the value with undef as it is dead but keep droppable uses around
3459     // as they provide information we don't want to give up on just yet.
3460     UndefValue &UV = *UndefValue::get(V.getType());
3461     bool AnyChange =
3462         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3463     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3464   }
3465 
3466   /// See AbstractAttribute::trackStatistics()
3467   void trackStatistics() const override {
3468     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3469   }
3470 };
3471 
3472 struct AAIsDeadArgument : public AAIsDeadFloating {
3473   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3474       : AAIsDeadFloating(IRP, A) {}
3475 
3476   /// See AbstractAttribute::initialize(...).
3477   void initialize(Attributor &A) override {
3478     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3479       indicatePessimisticFixpoint();
3480   }
3481 
3482   /// See AbstractAttribute::manifest(...).
3483   ChangeStatus manifest(Attributor &A) override {
3484     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3485     Argument &Arg = *getAssociatedArgument();
3486     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3487       if (A.registerFunctionSignatureRewrite(
3488               Arg, /* ReplacementTypes */ {},
3489               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3490               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3491         Arg.dropDroppableUses();
3492         return ChangeStatus::CHANGED;
3493       }
3494     return Changed;
3495   }
3496 
3497   /// See AbstractAttribute::trackStatistics()
3498   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3499 };
3500 
3501 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3502   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3503       : AAIsDeadValueImpl(IRP, A) {}
3504 
3505   /// See AbstractAttribute::initialize(...).
3506   void initialize(Attributor &A) override {
3507     if (isa<UndefValue>(getAssociatedValue()))
3508       indicatePessimisticFixpoint();
3509   }
3510 
3511   /// See AbstractAttribute::updateImpl(...).
3512   ChangeStatus updateImpl(Attributor &A) override {
3513     // TODO: Once we have call site specific value information we can provide
3514     //       call site specific liveness information and then it makes
3515     //       sense to specialize attributes for call sites arguments instead of
3516     //       redirecting requests to the callee argument.
3517     Argument *Arg = getAssociatedArgument();
3518     if (!Arg)
3519       return indicatePessimisticFixpoint();
3520     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3521     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3522     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3523   }
3524 
3525   /// See AbstractAttribute::manifest(...).
3526   ChangeStatus manifest(Attributor &A) override {
3527     CallBase &CB = cast<CallBase>(getAnchorValue());
3528     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3529     assert(!isa<UndefValue>(U.get()) &&
3530            "Expected undef values to be filtered out!");
3531     UndefValue &UV = *UndefValue::get(U->getType());
3532     if (A.changeUseAfterManifest(U, UV))
3533       return ChangeStatus::CHANGED;
3534     return ChangeStatus::UNCHANGED;
3535   }
3536 
3537   /// See AbstractAttribute::trackStatistics()
3538   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3539 };
3540 
3541 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3542   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3543       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3544 
3545   /// See AAIsDead::isAssumedDead().
3546   bool isAssumedDead() const override {
3547     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3548   }
3549 
3550   /// See AbstractAttribute::initialize(...).
3551   void initialize(Attributor &A) override {
3552     if (isa<UndefValue>(getAssociatedValue())) {
3553       indicatePessimisticFixpoint();
3554       return;
3555     }
3556 
3557     // We track this separately as a secondary state.
3558     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3559   }
3560 
3561   /// See AbstractAttribute::updateImpl(...).
3562   ChangeStatus updateImpl(Attributor &A) override {
3563     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3564     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3565       IsAssumedSideEffectFree = false;
3566       Changed = ChangeStatus::CHANGED;
3567     }
3568     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3569       return indicatePessimisticFixpoint();
3570     return Changed;
3571   }
3572 
3573   /// See AbstractAttribute::trackStatistics()
3574   void trackStatistics() const override {
3575     if (IsAssumedSideEffectFree)
3576       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3577     else
3578       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3579   }
3580 
3581   /// See AbstractAttribute::getAsStr().
3582   const std::string getAsStr() const override {
3583     return isAssumedDead()
3584                ? "assumed-dead"
3585                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3586   }
3587 
3588 private:
3589   bool IsAssumedSideEffectFree;
3590 };
3591 
3592 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3593   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3594       : AAIsDeadValueImpl(IRP, A) {}
3595 
3596   /// See AbstractAttribute::updateImpl(...).
3597   ChangeStatus updateImpl(Attributor &A) override {
3598 
3599     bool UsedAssumedInformation = false;
3600     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3601                               {Instruction::Ret}, UsedAssumedInformation);
3602 
3603     auto PredForCallSite = [&](AbstractCallSite ACS) {
3604       if (ACS.isCallbackCall() || !ACS.getInstruction())
3605         return false;
3606       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3607     };
3608 
3609     bool AllCallSitesKnown;
3610     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3611                                 AllCallSitesKnown))
3612       return indicatePessimisticFixpoint();
3613 
3614     return ChangeStatus::UNCHANGED;
3615   }
3616 
3617   /// See AbstractAttribute::manifest(...).
3618   ChangeStatus manifest(Attributor &A) override {
3619     // TODO: Rewrite the signature to return void?
3620     bool AnyChange = false;
3621     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3622     auto RetInstPred = [&](Instruction &I) {
3623       ReturnInst &RI = cast<ReturnInst>(I);
3624       if (!isa<UndefValue>(RI.getReturnValue()))
3625         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3626       return true;
3627     };
3628     bool UsedAssumedInformation = false;
3629     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3630                               UsedAssumedInformation);
3631     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3632   }
3633 
3634   /// See AbstractAttribute::trackStatistics()
3635   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3636 };
3637 
3638 struct AAIsDeadFunction : public AAIsDead {
3639   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3640 
3641   /// See AbstractAttribute::initialize(...).
3642   void initialize(Attributor &A) override {
3643     const Function *F = getAnchorScope();
3644     if (F && !F->isDeclaration()) {
3645       // We only want to compute liveness once. If the function is not part of
3646       // the SCC, skip it.
3647       if (A.isRunOn(*const_cast<Function *>(F))) {
3648         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3649         assumeLive(A, F->getEntryBlock());
3650       } else {
3651         indicatePessimisticFixpoint();
3652       }
3653     }
3654   }
3655 
3656   /// See AbstractAttribute::getAsStr().
3657   const std::string getAsStr() const override {
3658     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3659            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3660            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3661            std::to_string(KnownDeadEnds.size()) + "]";
3662   }
3663 
3664   /// See AbstractAttribute::manifest(...).
3665   ChangeStatus manifest(Attributor &A) override {
3666     assert(getState().isValidState() &&
3667            "Attempted to manifest an invalid state!");
3668 
3669     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3670     Function &F = *getAnchorScope();
3671 
3672     if (AssumedLiveBlocks.empty()) {
3673       A.deleteAfterManifest(F);
3674       return ChangeStatus::CHANGED;
3675     }
3676 
3677     // Flag to determine if we can change an invoke to a call assuming the
3678     // callee is nounwind. This is not possible if the personality of the
3679     // function allows to catch asynchronous exceptions.
3680     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3681 
3682     KnownDeadEnds.set_union(ToBeExploredFrom);
3683     for (const Instruction *DeadEndI : KnownDeadEnds) {
3684       auto *CB = dyn_cast<CallBase>(DeadEndI);
3685       if (!CB)
3686         continue;
3687       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3688           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3689       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3690       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3691         continue;
3692 
3693       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3694         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3695       else
3696         A.changeToUnreachableAfterManifest(
3697             const_cast<Instruction *>(DeadEndI->getNextNode()));
3698       HasChanged = ChangeStatus::CHANGED;
3699     }
3700 
3701     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3702     for (BasicBlock &BB : F)
3703       if (!AssumedLiveBlocks.count(&BB)) {
3704         A.deleteAfterManifest(BB);
3705         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3706       }
3707 
3708     return HasChanged;
3709   }
3710 
3711   /// See AbstractAttribute::updateImpl(...).
3712   ChangeStatus updateImpl(Attributor &A) override;
3713 
3714   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3715     return !AssumedLiveEdges.count(std::make_pair(From, To));
3716   }
3717 
3718   /// See AbstractAttribute::trackStatistics()
3719   void trackStatistics() const override {}
3720 
3721   /// Returns true if the function is assumed dead.
3722   bool isAssumedDead() const override { return false; }
3723 
3724   /// See AAIsDead::isKnownDead().
3725   bool isKnownDead() const override { return false; }
3726 
3727   /// See AAIsDead::isAssumedDead(BasicBlock *).
3728   bool isAssumedDead(const BasicBlock *BB) const override {
3729     assert(BB->getParent() == getAnchorScope() &&
3730            "BB must be in the same anchor scope function.");
3731 
3732     if (!getAssumed())
3733       return false;
3734     return !AssumedLiveBlocks.count(BB);
3735   }
3736 
3737   /// See AAIsDead::isKnownDead(BasicBlock *).
3738   bool isKnownDead(const BasicBlock *BB) const override {
3739     return getKnown() && isAssumedDead(BB);
3740   }
3741 
3742   /// See AAIsDead::isAssumed(Instruction *I).
3743   bool isAssumedDead(const Instruction *I) const override {
3744     assert(I->getParent()->getParent() == getAnchorScope() &&
3745            "Instruction must be in the same anchor scope function.");
3746 
3747     if (!getAssumed())
3748       return false;
3749 
3750     // If it is not in AssumedLiveBlocks then it for sure dead.
3751     // Otherwise, it can still be after noreturn call in a live block.
3752     if (!AssumedLiveBlocks.count(I->getParent()))
3753       return true;
3754 
3755     // If it is not after a liveness barrier it is live.
3756     const Instruction *PrevI = I->getPrevNode();
3757     while (PrevI) {
3758       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3759         return true;
3760       PrevI = PrevI->getPrevNode();
3761     }
3762     return false;
3763   }
3764 
3765   /// See AAIsDead::isKnownDead(Instruction *I).
3766   bool isKnownDead(const Instruction *I) const override {
3767     return getKnown() && isAssumedDead(I);
3768   }
3769 
3770   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3771   /// that internal function called from \p BB should now be looked at.
3772   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3773     if (!AssumedLiveBlocks.insert(&BB).second)
3774       return false;
3775 
3776     // We assume that all of BB is (probably) live now and if there are calls to
3777     // internal functions we will assume that those are now live as well. This
3778     // is a performance optimization for blocks with calls to a lot of internal
3779     // functions. It can however cause dead functions to be treated as live.
3780     for (const Instruction &I : BB)
3781       if (const auto *CB = dyn_cast<CallBase>(&I))
3782         if (const Function *F = CB->getCalledFunction())
3783           if (F->hasLocalLinkage())
3784             A.markLiveInternalFunction(*F);
3785     return true;
3786   }
3787 
3788   /// Collection of instructions that need to be explored again, e.g., we
3789   /// did assume they do not transfer control to (one of their) successors.
3790   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3791 
3792   /// Collection of instructions that are known to not transfer control.
3793   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3794 
3795   /// Collection of all assumed live edges
3796   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3797 
3798   /// Collection of all assumed live BasicBlocks.
3799   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3800 };
3801 
3802 static bool
3803 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3804                         AbstractAttribute &AA,
3805                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3806   const IRPosition &IPos = IRPosition::callsite_function(CB);
3807 
3808   const auto &NoReturnAA =
3809       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3810   if (NoReturnAA.isAssumedNoReturn())
3811     return !NoReturnAA.isKnownNoReturn();
3812   if (CB.isTerminator())
3813     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3814   else
3815     AliveSuccessors.push_back(CB.getNextNode());
3816   return false;
3817 }
3818 
3819 static bool
3820 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3821                         AbstractAttribute &AA,
3822                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3823   bool UsedAssumedInformation =
3824       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3825 
3826   // First, determine if we can change an invoke to a call assuming the
3827   // callee is nounwind. This is not possible if the personality of the
3828   // function allows to catch asynchronous exceptions.
3829   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3830     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3831   } else {
3832     const IRPosition &IPos = IRPosition::callsite_function(II);
3833     const auto &AANoUnw =
3834         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3835     if (AANoUnw.isAssumedNoUnwind()) {
3836       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3837     } else {
3838       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3839     }
3840   }
3841   return UsedAssumedInformation;
3842 }
3843 
3844 static bool
3845 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3846                         AbstractAttribute &AA,
3847                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3848   bool UsedAssumedInformation = false;
3849   if (BI.getNumSuccessors() == 1) {
3850     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3851   } else {
3852     Optional<Constant *> C =
3853         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3854     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3855       // No value yet, assume both edges are dead.
3856     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3857       const BasicBlock *SuccBB =
3858           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3859       AliveSuccessors.push_back(&SuccBB->front());
3860     } else {
3861       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3862       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3863       UsedAssumedInformation = false;
3864     }
3865   }
3866   return UsedAssumedInformation;
3867 }
3868 
3869 static bool
3870 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3871                         AbstractAttribute &AA,
3872                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3873   bool UsedAssumedInformation = false;
3874   Optional<Constant *> C =
3875       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3876   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3877     // No value yet, assume all edges are dead.
3878   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3879     for (auto &CaseIt : SI.cases()) {
3880       if (CaseIt.getCaseValue() == C.getValue()) {
3881         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3882         return UsedAssumedInformation;
3883       }
3884     }
3885     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3886     return UsedAssumedInformation;
3887   } else {
3888     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3889       AliveSuccessors.push_back(&SuccBB->front());
3890   }
3891   return UsedAssumedInformation;
3892 }
3893 
3894 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3895   ChangeStatus Change = ChangeStatus::UNCHANGED;
3896 
3897   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3898                     << getAnchorScope()->size() << "] BBs and "
3899                     << ToBeExploredFrom.size() << " exploration points and "
3900                     << KnownDeadEnds.size() << " known dead ends\n");
3901 
3902   // Copy and clear the list of instructions we need to explore from. It is
3903   // refilled with instructions the next update has to look at.
3904   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3905                                                ToBeExploredFrom.end());
3906   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3907 
3908   SmallVector<const Instruction *, 8> AliveSuccessors;
3909   while (!Worklist.empty()) {
3910     const Instruction *I = Worklist.pop_back_val();
3911     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3912 
3913     // Fast forward for uninteresting instructions. We could look for UB here
3914     // though.
3915     while (!I->isTerminator() && !isa<CallBase>(I))
3916       I = I->getNextNode();
3917 
3918     AliveSuccessors.clear();
3919 
3920     bool UsedAssumedInformation = false;
3921     switch (I->getOpcode()) {
3922     // TODO: look for (assumed) UB to backwards propagate "deadness".
3923     default:
3924       assert(I->isTerminator() &&
3925              "Expected non-terminators to be handled already!");
3926       for (const BasicBlock *SuccBB : successors(I->getParent()))
3927         AliveSuccessors.push_back(&SuccBB->front());
3928       break;
3929     case Instruction::Call:
3930       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3931                                                        *this, AliveSuccessors);
3932       break;
3933     case Instruction::Invoke:
3934       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3935                                                        *this, AliveSuccessors);
3936       break;
3937     case Instruction::Br:
3938       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3939                                                        *this, AliveSuccessors);
3940       break;
3941     case Instruction::Switch:
3942       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3943                                                        *this, AliveSuccessors);
3944       break;
3945     }
3946 
3947     if (UsedAssumedInformation) {
3948       NewToBeExploredFrom.insert(I);
3949     } else if (AliveSuccessors.empty() ||
3950                (I->isTerminator() &&
3951                 AliveSuccessors.size() < I->getNumSuccessors())) {
3952       if (KnownDeadEnds.insert(I))
3953         Change = ChangeStatus::CHANGED;
3954     }
3955 
3956     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3957                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3958                       << UsedAssumedInformation << "\n");
3959 
3960     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3961       if (!I->isTerminator()) {
3962         assert(AliveSuccessors.size() == 1 &&
3963                "Non-terminator expected to have a single successor!");
3964         Worklist.push_back(AliveSuccessor);
3965       } else {
3966         // record the assumed live edge
3967         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3968         if (AssumedLiveEdges.insert(Edge).second)
3969           Change = ChangeStatus::CHANGED;
3970         if (assumeLive(A, *AliveSuccessor->getParent()))
3971           Worklist.push_back(AliveSuccessor);
3972       }
3973     }
3974   }
3975 
3976   // Check if the content of ToBeExploredFrom changed, ignore the order.
3977   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3978       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3979         return !ToBeExploredFrom.count(I);
3980       })) {
3981     Change = ChangeStatus::CHANGED;
3982     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3983   }
3984 
3985   // If we know everything is live there is no need to query for liveness.
3986   // Instead, indicating a pessimistic fixpoint will cause the state to be
3987   // "invalid" and all queries to be answered conservatively without lookups.
3988   // To be in this state we have to (1) finished the exploration and (3) not
3989   // discovered any non-trivial dead end and (2) not ruled unreachable code
3990   // dead.
3991   if (ToBeExploredFrom.empty() &&
3992       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3993       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3994         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3995       }))
3996     return indicatePessimisticFixpoint();
3997   return Change;
3998 }
3999 
4000 /// Liveness information for a call sites.
4001 struct AAIsDeadCallSite final : AAIsDeadFunction {
4002   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
4003       : AAIsDeadFunction(IRP, A) {}
4004 
4005   /// See AbstractAttribute::initialize(...).
4006   void initialize(Attributor &A) override {
4007     // TODO: Once we have call site specific value information we can provide
4008     //       call site specific liveness information and then it makes
4009     //       sense to specialize attributes for call sites instead of
4010     //       redirecting requests to the callee.
4011     llvm_unreachable("Abstract attributes for liveness are not "
4012                      "supported for call sites yet!");
4013   }
4014 
4015   /// See AbstractAttribute::updateImpl(...).
4016   ChangeStatus updateImpl(Attributor &A) override {
4017     return indicatePessimisticFixpoint();
4018   }
4019 
4020   /// See AbstractAttribute::trackStatistics()
4021   void trackStatistics() const override {}
4022 };
4023 
4024 /// -------------------- Dereferenceable Argument Attribute --------------------
4025 
4026 struct AADereferenceableImpl : AADereferenceable {
4027   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4028       : AADereferenceable(IRP, A) {}
4029   using StateType = DerefState;
4030 
4031   /// See AbstractAttribute::initialize(...).
4032   void initialize(Attributor &A) override {
4033     SmallVector<Attribute, 4> Attrs;
4034     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4035              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4036     for (const Attribute &Attr : Attrs)
4037       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4038 
4039     const IRPosition &IRP = this->getIRPosition();
4040     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4041 
4042     bool CanBeNull, CanBeFreed;
4043     takeKnownDerefBytesMaximum(
4044         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4045             A.getDataLayout(), CanBeNull, CanBeFreed));
4046 
4047     bool IsFnInterface = IRP.isFnInterfaceKind();
4048     Function *FnScope = IRP.getAnchorScope();
4049     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4050       indicatePessimisticFixpoint();
4051       return;
4052     }
4053 
4054     if (Instruction *CtxI = getCtxI())
4055       followUsesInMBEC(*this, A, getState(), *CtxI);
4056   }
4057 
4058   /// See AbstractAttribute::getState()
4059   /// {
4060   StateType &getState() override { return *this; }
4061   const StateType &getState() const override { return *this; }
4062   /// }
4063 
4064   /// Helper function for collecting accessed bytes in must-be-executed-context
4065   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4066                               DerefState &State) {
4067     const Value *UseV = U->get();
4068     if (!UseV->getType()->isPointerTy())
4069       return;
4070 
4071     Type *PtrTy = UseV->getType();
4072     const DataLayout &DL = A.getDataLayout();
4073     int64_t Offset;
4074     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4075             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4076       if (Base == &getAssociatedValue() &&
4077           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4078         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4079         State.addAccessedBytes(Offset, Size);
4080       }
4081     }
4082   }
4083 
4084   /// See followUsesInMBEC
4085   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4086                        AADereferenceable::StateType &State) {
4087     bool IsNonNull = false;
4088     bool TrackUse = false;
4089     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4090         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4091     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4092                       << " for instruction " << *I << "\n");
4093 
4094     addAccessedBytesForUse(A, U, I, State);
4095     State.takeKnownDerefBytesMaximum(DerefBytes);
4096     return TrackUse;
4097   }
4098 
4099   /// See AbstractAttribute::manifest(...).
4100   ChangeStatus manifest(Attributor &A) override {
4101     ChangeStatus Change = AADereferenceable::manifest(A);
4102     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4103       removeAttrs({Attribute::DereferenceableOrNull});
4104       return ChangeStatus::CHANGED;
4105     }
4106     return Change;
4107   }
4108 
4109   void getDeducedAttributes(LLVMContext &Ctx,
4110                             SmallVectorImpl<Attribute> &Attrs) const override {
4111     // TODO: Add *_globally support
4112     if (isAssumedNonNull())
4113       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4114           Ctx, getAssumedDereferenceableBytes()));
4115     else
4116       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4117           Ctx, getAssumedDereferenceableBytes()));
4118   }
4119 
4120   /// See AbstractAttribute::getAsStr().
4121   const std::string getAsStr() const override {
4122     if (!getAssumedDereferenceableBytes())
4123       return "unknown-dereferenceable";
4124     return std::string("dereferenceable") +
4125            (isAssumedNonNull() ? "" : "_or_null") +
4126            (isAssumedGlobal() ? "_globally" : "") + "<" +
4127            std::to_string(getKnownDereferenceableBytes()) + "-" +
4128            std::to_string(getAssumedDereferenceableBytes()) + ">";
4129   }
4130 };
4131 
4132 /// Dereferenceable attribute for a floating value.
4133 struct AADereferenceableFloating : AADereferenceableImpl {
4134   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4135       : AADereferenceableImpl(IRP, A) {}
4136 
4137   /// See AbstractAttribute::updateImpl(...).
4138   ChangeStatus updateImpl(Attributor &A) override {
4139     const DataLayout &DL = A.getDataLayout();
4140 
4141     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4142                             bool Stripped) -> bool {
4143       unsigned IdxWidth =
4144           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4145       APInt Offset(IdxWidth, 0);
4146       const Value *Base =
4147           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4148 
4149       const auto &AA = A.getAAFor<AADereferenceable>(
4150           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4151       int64_t DerefBytes = 0;
4152       if (!Stripped && this == &AA) {
4153         // Use IR information if we did not strip anything.
4154         // TODO: track globally.
4155         bool CanBeNull, CanBeFreed;
4156         DerefBytes =
4157             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4158         T.GlobalState.indicatePessimisticFixpoint();
4159       } else {
4160         const DerefState &DS = AA.getState();
4161         DerefBytes = DS.DerefBytesState.getAssumed();
4162         T.GlobalState &= DS.GlobalState;
4163       }
4164 
4165       // For now we do not try to "increase" dereferenceability due to negative
4166       // indices as we first have to come up with code to deal with loops and
4167       // for overflows of the dereferenceable bytes.
4168       int64_t OffsetSExt = Offset.getSExtValue();
4169       if (OffsetSExt < 0)
4170         OffsetSExt = 0;
4171 
4172       T.takeAssumedDerefBytesMinimum(
4173           std::max(int64_t(0), DerefBytes - OffsetSExt));
4174 
4175       if (this == &AA) {
4176         if (!Stripped) {
4177           // If nothing was stripped IR information is all we got.
4178           T.takeKnownDerefBytesMaximum(
4179               std::max(int64_t(0), DerefBytes - OffsetSExt));
4180           T.indicatePessimisticFixpoint();
4181         } else if (OffsetSExt > 0) {
4182           // If something was stripped but there is circular reasoning we look
4183           // for the offset. If it is positive we basically decrease the
4184           // dereferenceable bytes in a circluar loop now, which will simply
4185           // drive them down to the known value in a very slow way which we
4186           // can accelerate.
4187           T.indicatePessimisticFixpoint();
4188         }
4189       }
4190 
4191       return T.isValidState();
4192     };
4193 
4194     DerefState T;
4195     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4196                                            VisitValueCB, getCtxI()))
4197       return indicatePessimisticFixpoint();
4198 
4199     return clampStateAndIndicateChange(getState(), T);
4200   }
4201 
4202   /// See AbstractAttribute::trackStatistics()
4203   void trackStatistics() const override {
4204     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4205   }
4206 };
4207 
4208 /// Dereferenceable attribute for a return value.
4209 struct AADereferenceableReturned final
4210     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4211   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4212       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4213             IRP, A) {}
4214 
4215   /// See AbstractAttribute::trackStatistics()
4216   void trackStatistics() const override {
4217     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4218   }
4219 };
4220 
4221 /// Dereferenceable attribute for an argument
4222 struct AADereferenceableArgument final
4223     : AAArgumentFromCallSiteArguments<AADereferenceable,
4224                                       AADereferenceableImpl> {
4225   using Base =
4226       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4227   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4228       : Base(IRP, A) {}
4229 
4230   /// See AbstractAttribute::trackStatistics()
4231   void trackStatistics() const override {
4232     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4233   }
4234 };
4235 
4236 /// Dereferenceable attribute for a call site argument.
4237 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4238   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4239       : AADereferenceableFloating(IRP, A) {}
4240 
4241   /// See AbstractAttribute::trackStatistics()
4242   void trackStatistics() const override {
4243     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4244   }
4245 };
4246 
4247 /// Dereferenceable attribute deduction for a call site return value.
4248 struct AADereferenceableCallSiteReturned final
4249     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4250   using Base =
4251       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4252   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4253       : Base(IRP, A) {}
4254 
4255   /// See AbstractAttribute::trackStatistics()
4256   void trackStatistics() const override {
4257     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4258   }
4259 };
4260 
4261 // ------------------------ Align Argument Attribute ------------------------
4262 
4263 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4264                                     Value &AssociatedValue, const Use *U,
4265                                     const Instruction *I, bool &TrackUse) {
4266   // We need to follow common pointer manipulation uses to the accesses they
4267   // feed into.
4268   if (isa<CastInst>(I)) {
4269     // Follow all but ptr2int casts.
4270     TrackUse = !isa<PtrToIntInst>(I);
4271     return 0;
4272   }
4273   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4274     if (GEP->hasAllConstantIndices())
4275       TrackUse = true;
4276     return 0;
4277   }
4278 
4279   MaybeAlign MA;
4280   if (const auto *CB = dyn_cast<CallBase>(I)) {
4281     if (CB->isBundleOperand(U) || CB->isCallee(U))
4282       return 0;
4283 
4284     unsigned ArgNo = CB->getArgOperandNo(U);
4285     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4286     // As long as we only use known information there is no need to track
4287     // dependences here.
4288     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4289     MA = MaybeAlign(AlignAA.getKnownAlign());
4290   }
4291 
4292   const DataLayout &DL = A.getDataLayout();
4293   const Value *UseV = U->get();
4294   if (auto *SI = dyn_cast<StoreInst>(I)) {
4295     if (SI->getPointerOperand() == UseV)
4296       MA = SI->getAlign();
4297   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4298     if (LI->getPointerOperand() == UseV)
4299       MA = LI->getAlign();
4300   }
4301 
4302   if (!MA || *MA <= QueryingAA.getKnownAlign())
4303     return 0;
4304 
4305   unsigned Alignment = MA->value();
4306   int64_t Offset;
4307 
4308   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4309     if (Base == &AssociatedValue) {
4310       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4311       // So we can say that the maximum power of two which is a divisor of
4312       // gcd(Offset, Alignment) is an alignment.
4313 
4314       uint32_t gcd =
4315           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4316       Alignment = llvm::PowerOf2Floor(gcd);
4317     }
4318   }
4319 
4320   return Alignment;
4321 }
4322 
4323 struct AAAlignImpl : AAAlign {
4324   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4325 
4326   /// See AbstractAttribute::initialize(...).
4327   void initialize(Attributor &A) override {
4328     SmallVector<Attribute, 4> Attrs;
4329     getAttrs({Attribute::Alignment}, Attrs);
4330     for (const Attribute &Attr : Attrs)
4331       takeKnownMaximum(Attr.getValueAsInt());
4332 
4333     Value &V = getAssociatedValue();
4334     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4335     //       use of the function pointer. This was caused by D73131. We want to
4336     //       avoid this for function pointers especially because we iterate
4337     //       their uses and int2ptr is not handled. It is not a correctness
4338     //       problem though!
4339     if (!V.getType()->getPointerElementType()->isFunctionTy())
4340       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4341 
4342     if (getIRPosition().isFnInterfaceKind() &&
4343         (!getAnchorScope() ||
4344          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4345       indicatePessimisticFixpoint();
4346       return;
4347     }
4348 
4349     if (Instruction *CtxI = getCtxI())
4350       followUsesInMBEC(*this, A, getState(), *CtxI);
4351   }
4352 
4353   /// See AbstractAttribute::manifest(...).
4354   ChangeStatus manifest(Attributor &A) override {
4355     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4356 
4357     // Check for users that allow alignment annotations.
4358     Value &AssociatedValue = getAssociatedValue();
4359     for (const Use &U : AssociatedValue.uses()) {
4360       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4361         if (SI->getPointerOperand() == &AssociatedValue)
4362           if (SI->getAlignment() < getAssumedAlign()) {
4363             STATS_DECLTRACK(AAAlign, Store,
4364                             "Number of times alignment added to a store");
4365             SI->setAlignment(Align(getAssumedAlign()));
4366             LoadStoreChanged = ChangeStatus::CHANGED;
4367           }
4368       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4369         if (LI->getPointerOperand() == &AssociatedValue)
4370           if (LI->getAlignment() < getAssumedAlign()) {
4371             LI->setAlignment(Align(getAssumedAlign()));
4372             STATS_DECLTRACK(AAAlign, Load,
4373                             "Number of times alignment added to a load");
4374             LoadStoreChanged = ChangeStatus::CHANGED;
4375           }
4376       }
4377     }
4378 
4379     ChangeStatus Changed = AAAlign::manifest(A);
4380 
4381     Align InheritAlign =
4382         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4383     if (InheritAlign >= getAssumedAlign())
4384       return LoadStoreChanged;
4385     return Changed | LoadStoreChanged;
4386   }
4387 
4388   // TODO: Provide a helper to determine the implied ABI alignment and check in
4389   //       the existing manifest method and a new one for AAAlignImpl that value
4390   //       to avoid making the alignment explicit if it did not improve.
4391 
4392   /// See AbstractAttribute::getDeducedAttributes
4393   virtual void
4394   getDeducedAttributes(LLVMContext &Ctx,
4395                        SmallVectorImpl<Attribute> &Attrs) const override {
4396     if (getAssumedAlign() > 1)
4397       Attrs.emplace_back(
4398           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4399   }
4400 
4401   /// See followUsesInMBEC
4402   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4403                        AAAlign::StateType &State) {
4404     bool TrackUse = false;
4405 
4406     unsigned int KnownAlign =
4407         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4408     State.takeKnownMaximum(KnownAlign);
4409 
4410     return TrackUse;
4411   }
4412 
4413   /// See AbstractAttribute::getAsStr().
4414   const std::string getAsStr() const override {
4415     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4416                                 "-" + std::to_string(getAssumedAlign()) + ">")
4417                              : "unknown-align";
4418   }
4419 };
4420 
4421 /// Align attribute for a floating value.
4422 struct AAAlignFloating : AAAlignImpl {
4423   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4424 
4425   /// See AbstractAttribute::updateImpl(...).
4426   ChangeStatus updateImpl(Attributor &A) override {
4427     const DataLayout &DL = A.getDataLayout();
4428 
4429     auto VisitValueCB = [&](Value &V, const Instruction *,
4430                             AAAlign::StateType &T, bool Stripped) -> bool {
4431       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4432                                            DepClassTy::REQUIRED);
4433       if (!Stripped && this == &AA) {
4434         int64_t Offset;
4435         unsigned Alignment = 1;
4436         if (const Value *Base =
4437                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4438           Align PA = Base->getPointerAlignment(DL);
4439           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4440           // So we can say that the maximum power of two which is a divisor of
4441           // gcd(Offset, Alignment) is an alignment.
4442 
4443           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4444                                                uint32_t(PA.value()));
4445           Alignment = llvm::PowerOf2Floor(gcd);
4446         } else {
4447           Alignment = V.getPointerAlignment(DL).value();
4448         }
4449         // Use only IR information if we did not strip anything.
4450         T.takeKnownMaximum(Alignment);
4451         T.indicatePessimisticFixpoint();
4452       } else {
4453         // Use abstract attribute information.
4454         const AAAlign::StateType &DS = AA.getState();
4455         T ^= DS;
4456       }
4457       return T.isValidState();
4458     };
4459 
4460     StateType T;
4461     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4462                                           VisitValueCB, getCtxI()))
4463       return indicatePessimisticFixpoint();
4464 
4465     // TODO: If we know we visited all incoming values, thus no are assumed
4466     // dead, we can take the known information from the state T.
4467     return clampStateAndIndicateChange(getState(), T);
4468   }
4469 
4470   /// See AbstractAttribute::trackStatistics()
4471   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4472 };
4473 
4474 /// Align attribute for function return value.
4475 struct AAAlignReturned final
4476     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4477   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4478   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4479 
4480   /// See AbstractAttribute::initialize(...).
4481   void initialize(Attributor &A) override {
4482     Base::initialize(A);
4483     Function *F = getAssociatedFunction();
4484     if (!F || F->isDeclaration())
4485       indicatePessimisticFixpoint();
4486   }
4487 
4488   /// See AbstractAttribute::trackStatistics()
4489   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4490 };
4491 
4492 /// Align attribute for function argument.
4493 struct AAAlignArgument final
4494     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4495   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4496   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4497 
4498   /// See AbstractAttribute::manifest(...).
4499   ChangeStatus manifest(Attributor &A) override {
4500     // If the associated argument is involved in a must-tail call we give up
4501     // because we would need to keep the argument alignments of caller and
4502     // callee in-sync. Just does not seem worth the trouble right now.
4503     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4504       return ChangeStatus::UNCHANGED;
4505     return Base::manifest(A);
4506   }
4507 
4508   /// See AbstractAttribute::trackStatistics()
4509   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4510 };
4511 
4512 struct AAAlignCallSiteArgument final : AAAlignFloating {
4513   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4514       : AAAlignFloating(IRP, A) {}
4515 
4516   /// See AbstractAttribute::manifest(...).
4517   ChangeStatus manifest(Attributor &A) override {
4518     // If the associated argument is involved in a must-tail call we give up
4519     // because we would need to keep the argument alignments of caller and
4520     // callee in-sync. Just does not seem worth the trouble right now.
4521     if (Argument *Arg = getAssociatedArgument())
4522       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4523         return ChangeStatus::UNCHANGED;
4524     ChangeStatus Changed = AAAlignImpl::manifest(A);
4525     Align InheritAlign =
4526         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4527     if (InheritAlign >= getAssumedAlign())
4528       Changed = ChangeStatus::UNCHANGED;
4529     return Changed;
4530   }
4531 
4532   /// See AbstractAttribute::updateImpl(Attributor &A).
4533   ChangeStatus updateImpl(Attributor &A) override {
4534     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4535     if (Argument *Arg = getAssociatedArgument()) {
4536       // We only take known information from the argument
4537       // so we do not need to track a dependence.
4538       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4539           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4540       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4541     }
4542     return Changed;
4543   }
4544 
4545   /// See AbstractAttribute::trackStatistics()
4546   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4547 };
4548 
4549 /// Align attribute deduction for a call site return value.
4550 struct AAAlignCallSiteReturned final
4551     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4552   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4553   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4554       : Base(IRP, A) {}
4555 
4556   /// See AbstractAttribute::initialize(...).
4557   void initialize(Attributor &A) override {
4558     Base::initialize(A);
4559     Function *F = getAssociatedFunction();
4560     if (!F || F->isDeclaration())
4561       indicatePessimisticFixpoint();
4562   }
4563 
4564   /// See AbstractAttribute::trackStatistics()
4565   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4566 };
4567 
4568 /// ------------------ Function No-Return Attribute ----------------------------
4569 struct AANoReturnImpl : public AANoReturn {
4570   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4571 
4572   /// See AbstractAttribute::initialize(...).
4573   void initialize(Attributor &A) override {
4574     AANoReturn::initialize(A);
4575     Function *F = getAssociatedFunction();
4576     if (!F || F->isDeclaration())
4577       indicatePessimisticFixpoint();
4578   }
4579 
4580   /// See AbstractAttribute::getAsStr().
4581   const std::string getAsStr() const override {
4582     return getAssumed() ? "noreturn" : "may-return";
4583   }
4584 
4585   /// See AbstractAttribute::updateImpl(Attributor &A).
4586   virtual ChangeStatus updateImpl(Attributor &A) override {
4587     auto CheckForNoReturn = [](Instruction &) { return false; };
4588     bool UsedAssumedInformation = false;
4589     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4590                                    {(unsigned)Instruction::Ret},
4591                                    UsedAssumedInformation))
4592       return indicatePessimisticFixpoint();
4593     return ChangeStatus::UNCHANGED;
4594   }
4595 };
4596 
4597 struct AANoReturnFunction final : AANoReturnImpl {
4598   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4599       : AANoReturnImpl(IRP, A) {}
4600 
4601   /// See AbstractAttribute::trackStatistics()
4602   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4603 };
4604 
4605 /// NoReturn attribute deduction for a call sites.
4606 struct AANoReturnCallSite final : AANoReturnImpl {
4607   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4608       : AANoReturnImpl(IRP, A) {}
4609 
4610   /// See AbstractAttribute::initialize(...).
4611   void initialize(Attributor &A) override {
4612     AANoReturnImpl::initialize(A);
4613     if (Function *F = getAssociatedFunction()) {
4614       const IRPosition &FnPos = IRPosition::function(*F);
4615       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4616       if (!FnAA.isAssumedNoReturn())
4617         indicatePessimisticFixpoint();
4618     }
4619   }
4620 
4621   /// See AbstractAttribute::updateImpl(...).
4622   ChangeStatus updateImpl(Attributor &A) override {
4623     // TODO: Once we have call site specific value information we can provide
4624     //       call site specific liveness information and then it makes
4625     //       sense to specialize attributes for call sites arguments instead of
4626     //       redirecting requests to the callee argument.
4627     Function *F = getAssociatedFunction();
4628     const IRPosition &FnPos = IRPosition::function(*F);
4629     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4630     return clampStateAndIndicateChange(getState(), FnAA.getState());
4631   }
4632 
4633   /// See AbstractAttribute::trackStatistics()
4634   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4635 };
4636 
4637 /// ----------------------- Variable Capturing ---------------------------------
4638 
4639 /// A class to hold the state of for no-capture attributes.
4640 struct AANoCaptureImpl : public AANoCapture {
4641   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4642 
4643   /// See AbstractAttribute::initialize(...).
4644   void initialize(Attributor &A) override {
4645     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4646       indicateOptimisticFixpoint();
4647       return;
4648     }
4649     Function *AnchorScope = getAnchorScope();
4650     if (isFnInterfaceKind() &&
4651         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4652       indicatePessimisticFixpoint();
4653       return;
4654     }
4655 
4656     // You cannot "capture" null in the default address space.
4657     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4658         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4659       indicateOptimisticFixpoint();
4660       return;
4661     }
4662 
4663     const Function *F =
4664         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4665 
4666     // Check what state the associated function can actually capture.
4667     if (F)
4668       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4669     else
4670       indicatePessimisticFixpoint();
4671   }
4672 
4673   /// See AbstractAttribute::updateImpl(...).
4674   ChangeStatus updateImpl(Attributor &A) override;
4675 
4676   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4677   virtual void
4678   getDeducedAttributes(LLVMContext &Ctx,
4679                        SmallVectorImpl<Attribute> &Attrs) const override {
4680     if (!isAssumedNoCaptureMaybeReturned())
4681       return;
4682 
4683     if (isArgumentPosition()) {
4684       if (isAssumedNoCapture())
4685         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4686       else if (ManifestInternal)
4687         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4688     }
4689   }
4690 
4691   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4692   /// depending on the ability of the function associated with \p IRP to capture
4693   /// state in memory and through "returning/throwing", respectively.
4694   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4695                                                    const Function &F,
4696                                                    BitIntegerState &State) {
4697     // TODO: Once we have memory behavior attributes we should use them here.
4698 
4699     // If we know we cannot communicate or write to memory, we do not care about
4700     // ptr2int anymore.
4701     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4702         F.getReturnType()->isVoidTy()) {
4703       State.addKnownBits(NO_CAPTURE);
4704       return;
4705     }
4706 
4707     // A function cannot capture state in memory if it only reads memory, it can
4708     // however return/throw state and the state might be influenced by the
4709     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4710     if (F.onlyReadsMemory())
4711       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4712 
4713     // A function cannot communicate state back if it does not through
4714     // exceptions and doesn not return values.
4715     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4716       State.addKnownBits(NOT_CAPTURED_IN_RET);
4717 
4718     // Check existing "returned" attributes.
4719     int ArgNo = IRP.getCalleeArgNo();
4720     if (F.doesNotThrow() && ArgNo >= 0) {
4721       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4722         if (F.hasParamAttribute(u, Attribute::Returned)) {
4723           if (u == unsigned(ArgNo))
4724             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4725           else if (F.onlyReadsMemory())
4726             State.addKnownBits(NO_CAPTURE);
4727           else
4728             State.addKnownBits(NOT_CAPTURED_IN_RET);
4729           break;
4730         }
4731     }
4732   }
4733 
4734   /// See AbstractState::getAsStr().
4735   const std::string getAsStr() const override {
4736     if (isKnownNoCapture())
4737       return "known not-captured";
4738     if (isAssumedNoCapture())
4739       return "assumed not-captured";
4740     if (isKnownNoCaptureMaybeReturned())
4741       return "known not-captured-maybe-returned";
4742     if (isAssumedNoCaptureMaybeReturned())
4743       return "assumed not-captured-maybe-returned";
4744     return "assumed-captured";
4745   }
4746 };
4747 
4748 /// Attributor-aware capture tracker.
4749 struct AACaptureUseTracker final : public CaptureTracker {
4750 
4751   /// Create a capture tracker that can lookup in-flight abstract attributes
4752   /// through the Attributor \p A.
4753   ///
4754   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4755   /// search is stopped. If a use leads to a return instruction,
4756   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4757   /// If a use leads to a ptr2int which may capture the value,
4758   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4759   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4760   /// set. All values in \p PotentialCopies are later tracked as well. For every
4761   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4762   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4763   /// conservatively set to true.
4764   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4765                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4766                       SmallSetVector<Value *, 4> &PotentialCopies,
4767                       unsigned &RemainingUsesToExplore)
4768       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4769         PotentialCopies(PotentialCopies),
4770         RemainingUsesToExplore(RemainingUsesToExplore) {}
4771 
4772   /// Determine if \p V maybe captured. *Also updates the state!*
4773   bool valueMayBeCaptured(const Value *V) {
4774     if (V->getType()->isPointerTy()) {
4775       PointerMayBeCaptured(V, this);
4776     } else {
4777       State.indicatePessimisticFixpoint();
4778     }
4779     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4780   }
4781 
4782   /// See CaptureTracker::tooManyUses().
4783   void tooManyUses() override {
4784     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4785   }
4786 
4787   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4788     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4789       return true;
4790     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4791         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4792     return DerefAA.getAssumedDereferenceableBytes();
4793   }
4794 
4795   /// See CaptureTracker::captured(...).
4796   bool captured(const Use *U) override {
4797     Instruction *UInst = cast<Instruction>(U->getUser());
4798     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4799                       << "\n");
4800 
4801     // Because we may reuse the tracker multiple times we keep track of the
4802     // number of explored uses ourselves as well.
4803     if (RemainingUsesToExplore-- == 0) {
4804       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4805       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4806                           /* Return */ true);
4807     }
4808 
4809     // Deal with ptr2int by following uses.
4810     if (isa<PtrToIntInst>(UInst)) {
4811       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4812       return valueMayBeCaptured(UInst);
4813     }
4814 
4815     // For stores we check if we can follow the value through memory or not.
4816     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4817       if (SI->isVolatile())
4818         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4819                             /* Return */ false);
4820       bool UsedAssumedInformation = false;
4821       if (!AA::getPotentialCopiesOfStoredValue(
4822               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4823         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4824                             /* Return */ false);
4825       // Not captured directly, potential copies will be checked.
4826       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4827                           /* Return */ false);
4828     }
4829 
4830     // Explicitly catch return instructions.
4831     if (isa<ReturnInst>(UInst)) {
4832       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4833         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4834                             /* Return */ true);
4835       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4836                           /* Return */ true);
4837     }
4838 
4839     // For now we only use special logic for call sites. However, the tracker
4840     // itself knows about a lot of other non-capturing cases already.
4841     auto *CB = dyn_cast<CallBase>(UInst);
4842     if (!CB || !CB->isArgOperand(U))
4843       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4844                           /* Return */ true);
4845 
4846     unsigned ArgNo = CB->getArgOperandNo(U);
4847     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4848     // If we have a abstract no-capture attribute for the argument we can use
4849     // it to justify a non-capture attribute here. This allows recursion!
4850     auto &ArgNoCaptureAA =
4851         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4852     if (ArgNoCaptureAA.isAssumedNoCapture())
4853       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4854                           /* Return */ false);
4855     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4856       addPotentialCopy(*CB);
4857       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4858                           /* Return */ false);
4859     }
4860 
4861     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4862     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4863                         /* Return */ true);
4864   }
4865 
4866   /// Register \p CS as potential copy of the value we are checking.
4867   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4868 
4869   /// See CaptureTracker::shouldExplore(...).
4870   bool shouldExplore(const Use *U) override {
4871     // Check liveness and ignore droppable users.
4872     bool UsedAssumedInformation = false;
4873     return !U->getUser()->isDroppable() &&
4874            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4875                             UsedAssumedInformation);
4876   }
4877 
4878   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4879   /// \p CapturedInRet, then return the appropriate value for use in the
4880   /// CaptureTracker::captured() interface.
4881   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4882                     bool CapturedInRet) {
4883     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4884                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4885     if (CapturedInMem)
4886       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4887     if (CapturedInInt)
4888       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4889     if (CapturedInRet)
4890       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4891     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4892   }
4893 
4894 private:
4895   /// The attributor providing in-flight abstract attributes.
4896   Attributor &A;
4897 
4898   /// The abstract attribute currently updated.
4899   AANoCapture &NoCaptureAA;
4900 
4901   /// The abstract liveness state.
4902   const AAIsDead &IsDeadAA;
4903 
4904   /// The state currently updated.
4905   AANoCapture::StateType &State;
4906 
4907   /// Set of potential copies of the tracked value.
4908   SmallSetVector<Value *, 4> &PotentialCopies;
4909 
4910   /// Global counter to limit the number of explored uses.
4911   unsigned &RemainingUsesToExplore;
4912 };
4913 
4914 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4915   const IRPosition &IRP = getIRPosition();
4916   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4917                                   : &IRP.getAssociatedValue();
4918   if (!V)
4919     return indicatePessimisticFixpoint();
4920 
4921   const Function *F =
4922       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4923   assert(F && "Expected a function!");
4924   const IRPosition &FnPos = IRPosition::function(*F);
4925   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4926 
4927   AANoCapture::StateType T;
4928 
4929   // Readonly means we cannot capture through memory.
4930   const auto &FnMemAA =
4931       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4932   if (FnMemAA.isAssumedReadOnly()) {
4933     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4934     if (FnMemAA.isKnownReadOnly())
4935       addKnownBits(NOT_CAPTURED_IN_MEM);
4936     else
4937       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4938   }
4939 
4940   // Make sure all returned values are different than the underlying value.
4941   // TODO: we could do this in a more sophisticated way inside
4942   //       AAReturnedValues, e.g., track all values that escape through returns
4943   //       directly somehow.
4944   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4945     bool SeenConstant = false;
4946     for (auto &It : RVAA.returned_values()) {
4947       if (isa<Constant>(It.first)) {
4948         if (SeenConstant)
4949           return false;
4950         SeenConstant = true;
4951       } else if (!isa<Argument>(It.first) ||
4952                  It.first == getAssociatedArgument())
4953         return false;
4954     }
4955     return true;
4956   };
4957 
4958   const auto &NoUnwindAA =
4959       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4960   if (NoUnwindAA.isAssumedNoUnwind()) {
4961     bool IsVoidTy = F->getReturnType()->isVoidTy();
4962     const AAReturnedValues *RVAA =
4963         IsVoidTy ? nullptr
4964                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4965 
4966                                                  DepClassTy::OPTIONAL);
4967     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4968       T.addKnownBits(NOT_CAPTURED_IN_RET);
4969       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4970         return ChangeStatus::UNCHANGED;
4971       if (NoUnwindAA.isKnownNoUnwind() &&
4972           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4973         addKnownBits(NOT_CAPTURED_IN_RET);
4974         if (isKnown(NOT_CAPTURED_IN_MEM))
4975           return indicateOptimisticFixpoint();
4976       }
4977     }
4978   }
4979 
4980   // Use the CaptureTracker interface and logic with the specialized tracker,
4981   // defined in AACaptureUseTracker, that can look at in-flight abstract
4982   // attributes and directly updates the assumed state.
4983   SmallSetVector<Value *, 4> PotentialCopies;
4984   unsigned RemainingUsesToExplore =
4985       getDefaultMaxUsesToExploreForCaptureTracking();
4986   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4987                               RemainingUsesToExplore);
4988 
4989   // Check all potential copies of the associated value until we can assume
4990   // none will be captured or we have to assume at least one might be.
4991   unsigned Idx = 0;
4992   PotentialCopies.insert(V);
4993   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4994     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4995 
4996   AANoCapture::StateType &S = getState();
4997   auto Assumed = S.getAssumed();
4998   S.intersectAssumedBits(T.getAssumed());
4999   if (!isAssumedNoCaptureMaybeReturned())
5000     return indicatePessimisticFixpoint();
5001   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
5002                                    : ChangeStatus::CHANGED;
5003 }
5004 
5005 /// NoCapture attribute for function arguments.
5006 struct AANoCaptureArgument final : AANoCaptureImpl {
5007   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
5008       : AANoCaptureImpl(IRP, A) {}
5009 
5010   /// See AbstractAttribute::trackStatistics()
5011   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
5012 };
5013 
5014 /// NoCapture attribute for call site arguments.
5015 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
5016   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5017       : AANoCaptureImpl(IRP, A) {}
5018 
5019   /// See AbstractAttribute::initialize(...).
5020   void initialize(Attributor &A) override {
5021     if (Argument *Arg = getAssociatedArgument())
5022       if (Arg->hasByValAttr())
5023         indicateOptimisticFixpoint();
5024     AANoCaptureImpl::initialize(A);
5025   }
5026 
5027   /// See AbstractAttribute::updateImpl(...).
5028   ChangeStatus updateImpl(Attributor &A) override {
5029     // TODO: Once we have call site specific value information we can provide
5030     //       call site specific liveness information and then it makes
5031     //       sense to specialize attributes for call sites arguments instead of
5032     //       redirecting requests to the callee argument.
5033     Argument *Arg = getAssociatedArgument();
5034     if (!Arg)
5035       return indicatePessimisticFixpoint();
5036     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5037     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5038     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5039   }
5040 
5041   /// See AbstractAttribute::trackStatistics()
5042   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5043 };
5044 
5045 /// NoCapture attribute for floating values.
5046 struct AANoCaptureFloating final : AANoCaptureImpl {
5047   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5048       : AANoCaptureImpl(IRP, A) {}
5049 
5050   /// See AbstractAttribute::trackStatistics()
5051   void trackStatistics() const override {
5052     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5053   }
5054 };
5055 
5056 /// NoCapture attribute for function return value.
5057 struct AANoCaptureReturned final : AANoCaptureImpl {
5058   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5059       : AANoCaptureImpl(IRP, A) {
5060     llvm_unreachable("NoCapture is not applicable to function returns!");
5061   }
5062 
5063   /// See AbstractAttribute::initialize(...).
5064   void initialize(Attributor &A) override {
5065     llvm_unreachable("NoCapture is not applicable to function returns!");
5066   }
5067 
5068   /// See AbstractAttribute::updateImpl(...).
5069   ChangeStatus updateImpl(Attributor &A) override {
5070     llvm_unreachable("NoCapture is not applicable to function returns!");
5071   }
5072 
5073   /// See AbstractAttribute::trackStatistics()
5074   void trackStatistics() const override {}
5075 };
5076 
5077 /// NoCapture attribute deduction for a call site return value.
5078 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5079   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5080       : AANoCaptureImpl(IRP, A) {}
5081 
5082   /// See AbstractAttribute::initialize(...).
5083   void initialize(Attributor &A) override {
5084     const Function *F = getAnchorScope();
5085     // Check what state the associated function can actually capture.
5086     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5087   }
5088 
5089   /// See AbstractAttribute::trackStatistics()
5090   void trackStatistics() const override {
5091     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5092   }
5093 };
5094 
5095 /// ------------------ Value Simplify Attribute ----------------------------
5096 
5097 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5098   // FIXME: Add a typecast support.
5099   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5100       SimplifiedAssociatedValue, Other, Ty);
5101   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5102     return false;
5103 
5104   LLVM_DEBUG({
5105     if (SimplifiedAssociatedValue.hasValue())
5106       dbgs() << "[ValueSimplify] is assumed to be "
5107              << **SimplifiedAssociatedValue << "\n";
5108     else
5109       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5110   });
5111   return true;
5112 }
5113 
5114 struct AAValueSimplifyImpl : AAValueSimplify {
5115   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5116       : AAValueSimplify(IRP, A) {}
5117 
5118   /// See AbstractAttribute::initialize(...).
5119   void initialize(Attributor &A) override {
5120     if (getAssociatedValue().getType()->isVoidTy())
5121       indicatePessimisticFixpoint();
5122     if (A.hasSimplificationCallback(getIRPosition()))
5123       indicatePessimisticFixpoint();
5124   }
5125 
5126   /// See AbstractAttribute::getAsStr().
5127   const std::string getAsStr() const override {
5128     LLVM_DEBUG({
5129       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5130       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5131         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5132     });
5133     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5134                           : "not-simple";
5135   }
5136 
5137   /// See AbstractAttribute::trackStatistics()
5138   void trackStatistics() const override {}
5139 
5140   /// See AAValueSimplify::getAssumedSimplifiedValue()
5141   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5142     return SimplifiedAssociatedValue;
5143   }
5144 
5145   /// Return a value we can use as replacement for the associated one, or
5146   /// nullptr if we don't have one that makes sense.
5147   Value *getReplacementValue(Attributor &A) const {
5148     Value *NewV;
5149     NewV = SimplifiedAssociatedValue.hasValue()
5150                ? SimplifiedAssociatedValue.getValue()
5151                : UndefValue::get(getAssociatedType());
5152     if (!NewV)
5153       return nullptr;
5154     NewV = AA::getWithType(*NewV, *getAssociatedType());
5155     if (!NewV || NewV == &getAssociatedValue())
5156       return nullptr;
5157     const Instruction *CtxI = getCtxI();
5158     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5159       return nullptr;
5160     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5161       return nullptr;
5162     return NewV;
5163   }
5164 
5165   /// Helper function for querying AAValueSimplify and updating candicate.
5166   /// \param IRP The value position we are trying to unify with SimplifiedValue
5167   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5168                       const IRPosition &IRP, bool Simplify = true) {
5169     bool UsedAssumedInformation = false;
5170     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5171     if (Simplify)
5172       QueryingValueSimplified =
5173           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5174     return unionAssumed(QueryingValueSimplified);
5175   }
5176 
5177   /// Returns a candidate is found or not
5178   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5179     if (!getAssociatedValue().getType()->isIntegerTy())
5180       return false;
5181 
5182     // This will also pass the call base context.
5183     const auto &AA =
5184         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5185 
5186     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5187 
5188     if (!COpt.hasValue()) {
5189       SimplifiedAssociatedValue = llvm::None;
5190       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5191       return true;
5192     }
5193     if (auto *C = COpt.getValue()) {
5194       SimplifiedAssociatedValue = C;
5195       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5196       return true;
5197     }
5198     return false;
5199   }
5200 
5201   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5202     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5203       return true;
5204     if (askSimplifiedValueFor<AAPotentialValues>(A))
5205       return true;
5206     return false;
5207   }
5208 
5209   /// See AbstractAttribute::manifest(...).
5210   ChangeStatus manifest(Attributor &A) override {
5211     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5212     if (getAssociatedValue().user_empty())
5213       return Changed;
5214 
5215     if (auto *NewV = getReplacementValue(A)) {
5216       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5217                         << *NewV << " :: " << *this << "\n");
5218       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5219         Changed = ChangeStatus::CHANGED;
5220     }
5221 
5222     return Changed | AAValueSimplify::manifest(A);
5223   }
5224 
5225   /// See AbstractState::indicatePessimisticFixpoint(...).
5226   ChangeStatus indicatePessimisticFixpoint() override {
5227     SimplifiedAssociatedValue = &getAssociatedValue();
5228     return AAValueSimplify::indicatePessimisticFixpoint();
5229   }
5230 
5231   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5232                          LoadInst &L, function_ref<bool(Value &)> Union) {
5233     auto UnionWrapper = [&](Value &V, Value &Obj) {
5234       if (isa<AllocaInst>(Obj))
5235         return Union(V);
5236       if (!AA::isDynamicallyUnique(A, AA, V))
5237         return false;
5238       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5239         return false;
5240       return Union(V);
5241     };
5242 
5243     Value &Ptr = *L.getPointerOperand();
5244     SmallVector<Value *, 8> Objects;
5245     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5246       return false;
5247 
5248     for (Value *Obj : Objects) {
5249       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5250       if (isa<UndefValue>(Obj))
5251         continue;
5252       if (isa<ConstantPointerNull>(Obj)) {
5253         // A null pointer access can be undefined but any offset from null may
5254         // be OK. We do not try to optimize the latter.
5255         bool UsedAssumedInformation = false;
5256         if (!NullPointerIsDefined(L.getFunction(),
5257                                   Ptr.getType()->getPointerAddressSpace()) &&
5258             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5259           continue;
5260         return false;
5261       }
5262       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5263         return false;
5264       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5265       if (!InitialVal || !Union(*InitialVal))
5266         return false;
5267 
5268       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5269                            "propagation, checking accesses next.\n");
5270 
5271       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5272         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5273         if (!Acc.isWrite())
5274           return true;
5275         if (Acc.isWrittenValueYetUndetermined())
5276           return true;
5277         Value *Content = Acc.getWrittenValue();
5278         if (!Content)
5279           return false;
5280         Value *CastedContent =
5281             AA::getWithType(*Content, *AA.getAssociatedType());
5282         if (!CastedContent)
5283           return false;
5284         if (IsExact)
5285           return UnionWrapper(*CastedContent, *Obj);
5286         if (auto *C = dyn_cast<Constant>(CastedContent))
5287           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5288             return UnionWrapper(*CastedContent, *Obj);
5289         return false;
5290       };
5291 
5292       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5293                                            DepClassTy::REQUIRED);
5294       if (!PI.forallInterferingAccesses(L, CheckAccess))
5295         return false;
5296     }
5297     return true;
5298   }
5299 };
5300 
5301 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5302   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5303       : AAValueSimplifyImpl(IRP, A) {}
5304 
5305   void initialize(Attributor &A) override {
5306     AAValueSimplifyImpl::initialize(A);
5307     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5308       indicatePessimisticFixpoint();
5309     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5310                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5311                 /* IgnoreSubsumingPositions */ true))
5312       indicatePessimisticFixpoint();
5313 
5314     // FIXME: This is a hack to prevent us from propagating function poiner in
5315     // the new pass manager CGSCC pass as it creates call edges the
5316     // CallGraphUpdater cannot handle yet.
5317     Value &V = getAssociatedValue();
5318     if (V.getType()->isPointerTy() &&
5319         V.getType()->getPointerElementType()->isFunctionTy() &&
5320         !A.isModulePass())
5321       indicatePessimisticFixpoint();
5322   }
5323 
5324   /// See AbstractAttribute::updateImpl(...).
5325   ChangeStatus updateImpl(Attributor &A) override {
5326     // Byval is only replacable if it is readonly otherwise we would write into
5327     // the replaced value and not the copy that byval creates implicitly.
5328     Argument *Arg = getAssociatedArgument();
5329     if (Arg->hasByValAttr()) {
5330       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5331       //       there is no race by not copying a constant byval.
5332       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5333                                                        DepClassTy::REQUIRED);
5334       if (!MemAA.isAssumedReadOnly())
5335         return indicatePessimisticFixpoint();
5336     }
5337 
5338     auto Before = SimplifiedAssociatedValue;
5339 
5340     auto PredForCallSite = [&](AbstractCallSite ACS) {
5341       const IRPosition &ACSArgPos =
5342           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5343       // Check if a coresponding argument was found or if it is on not
5344       // associated (which can happen for callback calls).
5345       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5346         return false;
5347 
5348       // Simplify the argument operand explicitly and check if the result is
5349       // valid in the current scope. This avoids refering to simplified values
5350       // in other functions, e.g., we don't want to say a an argument in a
5351       // static function is actually an argument in a different function.
5352       bool UsedAssumedInformation = false;
5353       Optional<Constant *> SimpleArgOp =
5354           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5355       if (!SimpleArgOp.hasValue())
5356         return true;
5357       if (!SimpleArgOp.getValue())
5358         return false;
5359       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5360         return false;
5361       return unionAssumed(*SimpleArgOp);
5362     };
5363 
5364     // Generate a answer specific to a call site context.
5365     bool Success;
5366     bool AllCallSitesKnown;
5367     if (hasCallBaseContext() &&
5368         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5369       Success = PredForCallSite(
5370           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5371     else
5372       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5373                                        AllCallSitesKnown);
5374 
5375     if (!Success)
5376       if (!askSimplifiedValueForOtherAAs(A))
5377         return indicatePessimisticFixpoint();
5378 
5379     // If a candicate was found in this update, return CHANGED.
5380     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5381                                                : ChangeStatus ::CHANGED;
5382   }
5383 
5384   /// See AbstractAttribute::trackStatistics()
5385   void trackStatistics() const override {
5386     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5387   }
5388 };
5389 
5390 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5391   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5392       : AAValueSimplifyImpl(IRP, A) {}
5393 
5394   /// See AAValueSimplify::getAssumedSimplifiedValue()
5395   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5396     if (!isValidState())
5397       return nullptr;
5398     return SimplifiedAssociatedValue;
5399   }
5400 
5401   /// See AbstractAttribute::updateImpl(...).
5402   ChangeStatus updateImpl(Attributor &A) override {
5403     auto Before = SimplifiedAssociatedValue;
5404 
5405     auto PredForReturned = [&](Value &V) {
5406       return checkAndUpdate(A, *this,
5407                             IRPosition::value(V, getCallBaseContext()));
5408     };
5409 
5410     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5411       if (!askSimplifiedValueForOtherAAs(A))
5412         return indicatePessimisticFixpoint();
5413 
5414     // If a candicate was found in this update, return CHANGED.
5415     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5416                                                : ChangeStatus ::CHANGED;
5417   }
5418 
5419   ChangeStatus manifest(Attributor &A) override {
5420     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5421 
5422     if (auto *NewV = getReplacementValue(A)) {
5423       auto PredForReturned =
5424           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5425             for (ReturnInst *RI : RetInsts) {
5426               Value *ReturnedVal = RI->getReturnValue();
5427               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5428                 return true;
5429               assert(RI->getFunction() == getAnchorScope() &&
5430                      "ReturnInst in wrong function!");
5431               LLVM_DEBUG(dbgs()
5432                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5433                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5434               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5435                 Changed = ChangeStatus::CHANGED;
5436             }
5437             return true;
5438           };
5439       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5440     }
5441 
5442     return Changed | AAValueSimplify::manifest(A);
5443   }
5444 
5445   /// See AbstractAttribute::trackStatistics()
5446   void trackStatistics() const override {
5447     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5448   }
5449 };
5450 
5451 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5452   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5453       : AAValueSimplifyImpl(IRP, A) {}
5454 
5455   /// See AbstractAttribute::initialize(...).
5456   void initialize(Attributor &A) override {
5457     AAValueSimplifyImpl::initialize(A);
5458     Value &V = getAnchorValue();
5459 
5460     // TODO: add other stuffs
5461     if (isa<Constant>(V))
5462       indicatePessimisticFixpoint();
5463   }
5464 
5465   /// Check if \p Cmp is a comparison we can simplify.
5466   ///
5467   /// We handle multiple cases, one in which at least one operand is an
5468   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5469   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5470   /// will be updated.
5471   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5472     auto Union = [&](Value &V) {
5473       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5474           SimplifiedAssociatedValue, &V, V.getType());
5475       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5476     };
5477 
5478     Value *LHS = Cmp.getOperand(0);
5479     Value *RHS = Cmp.getOperand(1);
5480 
5481     // Simplify the operands first.
5482     bool UsedAssumedInformation = false;
5483     const auto &SimplifiedLHS =
5484         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5485                                *this, UsedAssumedInformation);
5486     if (!SimplifiedLHS.hasValue())
5487       return true;
5488     if (!SimplifiedLHS.getValue())
5489       return false;
5490     LHS = *SimplifiedLHS;
5491 
5492     const auto &SimplifiedRHS =
5493         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5494                                *this, UsedAssumedInformation);
5495     if (!SimplifiedRHS.hasValue())
5496       return true;
5497     if (!SimplifiedRHS.getValue())
5498       return false;
5499     RHS = *SimplifiedRHS;
5500 
5501     LLVMContext &Ctx = Cmp.getContext();
5502     // Handle the trivial case first in which we don't even need to think about
5503     // null or non-null.
5504     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5505       Constant *NewVal =
5506           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5507       if (!Union(*NewVal))
5508         return false;
5509       if (!UsedAssumedInformation)
5510         indicateOptimisticFixpoint();
5511       return true;
5512     }
5513 
5514     // From now on we only handle equalities (==, !=).
5515     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5516     if (!ICmp || !ICmp->isEquality())
5517       return false;
5518 
5519     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5520     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5521     if (!LHSIsNull && !RHSIsNull)
5522       return false;
5523 
5524     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5525     // non-nullptr operand and if we assume it's non-null we can conclude the
5526     // result of the comparison.
5527     assert((LHSIsNull || RHSIsNull) &&
5528            "Expected nullptr versus non-nullptr comparison at this point");
5529 
5530     // The index is the operand that we assume is not null.
5531     unsigned PtrIdx = LHSIsNull;
5532     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5533         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5534         DepClassTy::REQUIRED);
5535     if (!PtrNonNullAA.isAssumedNonNull())
5536       return false;
5537     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5538 
5539     // The new value depends on the predicate, true for != and false for ==.
5540     Constant *NewVal = ConstantInt::get(
5541         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5542     if (!Union(*NewVal))
5543       return false;
5544 
5545     if (!UsedAssumedInformation)
5546       indicateOptimisticFixpoint();
5547 
5548     return true;
5549   }
5550 
5551   bool updateWithLoad(Attributor &A, LoadInst &L) {
5552     auto Union = [&](Value &V) {
5553       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5554           SimplifiedAssociatedValue, &V, L.getType());
5555       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5556     };
5557     return handleLoad(A, *this, L, Union);
5558   }
5559 
5560   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5561   /// simplify any operand of the instruction \p I. Return true if successful,
5562   /// in that case SimplifiedAssociatedValue will be updated.
5563   bool handleGenericInst(Attributor &A, Instruction &I) {
5564     bool SomeSimplified = false;
5565     bool UsedAssumedInformation = false;
5566 
5567     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5568     int Idx = 0;
5569     for (Value *Op : I.operands()) {
5570       const auto &SimplifiedOp =
5571           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5572                                  *this, UsedAssumedInformation);
5573       // If we are not sure about any operand we are not sure about the entire
5574       // instruction, we'll wait.
5575       if (!SimplifiedOp.hasValue())
5576         return true;
5577 
5578       if (SimplifiedOp.getValue())
5579         NewOps[Idx] = SimplifiedOp.getValue();
5580       else
5581         NewOps[Idx] = Op;
5582 
5583       SomeSimplified |= (NewOps[Idx] != Op);
5584       ++Idx;
5585     }
5586 
5587     // We won't bother with the InstSimplify interface if we didn't simplify any
5588     // operand ourselves.
5589     if (!SomeSimplified)
5590       return false;
5591 
5592     InformationCache &InfoCache = A.getInfoCache();
5593     Function *F = I.getFunction();
5594     const auto *DT =
5595         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5596     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5597     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5598     OptimizationRemarkEmitter *ORE = nullptr;
5599 
5600     const DataLayout &DL = I.getModule()->getDataLayout();
5601     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5602     if (Value *SimplifiedI =
5603             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5604       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5605           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5606       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5607     }
5608     return false;
5609   }
5610 
5611   /// See AbstractAttribute::updateImpl(...).
5612   ChangeStatus updateImpl(Attributor &A) override {
5613     auto Before = SimplifiedAssociatedValue;
5614 
5615     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5616                             bool Stripped) -> bool {
5617       auto &AA = A.getAAFor<AAValueSimplify>(
5618           *this, IRPosition::value(V, getCallBaseContext()),
5619           DepClassTy::REQUIRED);
5620       if (!Stripped && this == &AA) {
5621 
5622         if (auto *I = dyn_cast<Instruction>(&V)) {
5623           if (auto *LI = dyn_cast<LoadInst>(&V))
5624             if (updateWithLoad(A, *LI))
5625               return true;
5626           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5627             if (handleCmp(A, *Cmp))
5628               return true;
5629           if (handleGenericInst(A, *I))
5630             return true;
5631         }
5632         // TODO: Look the instruction and check recursively.
5633 
5634         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5635                           << "\n");
5636         return false;
5637       }
5638       return checkAndUpdate(A, *this,
5639                             IRPosition::value(V, getCallBaseContext()));
5640     };
5641 
5642     bool Dummy = false;
5643     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5644                                      VisitValueCB, getCtxI(),
5645                                      /* UseValueSimplify */ false))
5646       if (!askSimplifiedValueForOtherAAs(A))
5647         return indicatePessimisticFixpoint();
5648 
5649     // If a candicate was found in this update, return CHANGED.
5650     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5651                                                : ChangeStatus ::CHANGED;
5652   }
5653 
5654   /// See AbstractAttribute::trackStatistics()
5655   void trackStatistics() const override {
5656     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5657   }
5658 };
5659 
5660 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5661   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5662       : AAValueSimplifyImpl(IRP, A) {}
5663 
5664   /// See AbstractAttribute::initialize(...).
5665   void initialize(Attributor &A) override {
5666     SimplifiedAssociatedValue = nullptr;
5667     indicateOptimisticFixpoint();
5668   }
5669   /// See AbstractAttribute::initialize(...).
5670   ChangeStatus updateImpl(Attributor &A) override {
5671     llvm_unreachable(
5672         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5673   }
5674   /// See AbstractAttribute::trackStatistics()
5675   void trackStatistics() const override {
5676     STATS_DECLTRACK_FN_ATTR(value_simplify)
5677   }
5678 };
5679 
5680 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5681   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5682       : AAValueSimplifyFunction(IRP, A) {}
5683   /// See AbstractAttribute::trackStatistics()
5684   void trackStatistics() const override {
5685     STATS_DECLTRACK_CS_ATTR(value_simplify)
5686   }
5687 };
5688 
5689 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5690   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5691       : AAValueSimplifyImpl(IRP, A) {}
5692 
5693   void initialize(Attributor &A) override {
5694     AAValueSimplifyImpl::initialize(A);
5695     if (!getAssociatedFunction())
5696       indicatePessimisticFixpoint();
5697   }
5698 
5699   /// See AbstractAttribute::updateImpl(...).
5700   ChangeStatus updateImpl(Attributor &A) override {
5701     auto Before = SimplifiedAssociatedValue;
5702     auto &RetAA = A.getAAFor<AAReturnedValues>(
5703         *this, IRPosition::function(*getAssociatedFunction()),
5704         DepClassTy::REQUIRED);
5705     auto PredForReturned =
5706         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5707           bool UsedAssumedInformation = false;
5708           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5709               &RetVal, *cast<CallBase>(getCtxI()), *this,
5710               UsedAssumedInformation);
5711           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5712               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5713           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5714         };
5715     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5716       if (!askSimplifiedValueForOtherAAs(A))
5717         return indicatePessimisticFixpoint();
5718     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5719                                                : ChangeStatus ::CHANGED;
5720   }
5721 
5722   void trackStatistics() const override {
5723     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5724   }
5725 };
5726 
5727 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5728   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5729       : AAValueSimplifyFloating(IRP, A) {}
5730 
5731   /// See AbstractAttribute::manifest(...).
5732   ChangeStatus manifest(Attributor &A) override {
5733     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5734 
5735     if (auto *NewV = getReplacementValue(A)) {
5736       Use &U = cast<CallBase>(&getAnchorValue())
5737                    ->getArgOperandUse(getCallSiteArgNo());
5738       if (A.changeUseAfterManifest(U, *NewV))
5739         Changed = ChangeStatus::CHANGED;
5740     }
5741 
5742     return Changed | AAValueSimplify::manifest(A);
5743   }
5744 
5745   void trackStatistics() const override {
5746     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5747   }
5748 };
5749 
5750 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5751 struct AAHeapToStackFunction final : public AAHeapToStack {
5752 
5753   struct AllocationInfo {
5754     /// The call that allocates the memory.
5755     CallBase *const CB;
5756 
5757     /// The kind of allocation.
5758     const enum class AllocationKind {
5759       MALLOC,
5760       CALLOC,
5761       ALIGNED_ALLOC,
5762     } Kind;
5763 
5764     /// The library function id for the allocation.
5765     LibFunc LibraryFunctionId = NotLibFunc;
5766 
5767     /// The status wrt. a rewrite.
5768     enum {
5769       STACK_DUE_TO_USE,
5770       STACK_DUE_TO_FREE,
5771       INVALID,
5772     } Status = STACK_DUE_TO_USE;
5773 
5774     /// Flag to indicate if we encountered a use that might free this allocation
5775     /// but which is not in the deallocation infos.
5776     bool HasPotentiallyFreeingUnknownUses = false;
5777 
5778     /// The set of free calls that use this allocation.
5779     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5780   };
5781 
5782   struct DeallocationInfo {
5783     /// The call that deallocates the memory.
5784     CallBase *const CB;
5785 
5786     /// Flag to indicate if we don't know all objects this deallocation might
5787     /// free.
5788     bool MightFreeUnknownObjects = false;
5789 
5790     /// The set of allocation calls that are potentially freed.
5791     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5792   };
5793 
5794   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5795       : AAHeapToStack(IRP, A) {}
5796 
5797   ~AAHeapToStackFunction() {
5798     // Ensure we call the destructor so we release any memory allocated in the
5799     // sets.
5800     for (auto &It : AllocationInfos)
5801       It.getSecond()->~AllocationInfo();
5802     for (auto &It : DeallocationInfos)
5803       It.getSecond()->~DeallocationInfo();
5804   }
5805 
5806   void initialize(Attributor &A) override {
5807     AAHeapToStack::initialize(A);
5808 
5809     const Function *F = getAnchorScope();
5810     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5811 
5812     auto AllocationIdentifierCB = [&](Instruction &I) {
5813       CallBase *CB = dyn_cast<CallBase>(&I);
5814       if (!CB)
5815         return true;
5816       if (isFreeCall(CB, TLI)) {
5817         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5818         return true;
5819       }
5820       bool IsMalloc = isMallocLikeFn(CB, TLI);
5821       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5822       bool IsCalloc =
5823           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5824       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5825         return true;
5826       auto Kind =
5827           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5828                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5829                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5830 
5831       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5832       AllocationInfos[CB] = AI;
5833       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5834       return true;
5835     };
5836 
5837     bool UsedAssumedInformation = false;
5838     bool Success = A.checkForAllCallLikeInstructions(
5839         AllocationIdentifierCB, *this, UsedAssumedInformation,
5840         /* CheckBBLivenessOnly */ false,
5841         /* CheckPotentiallyDead */ true);
5842     (void)Success;
5843     assert(Success && "Did not expect the call base visit callback to fail!");
5844   }
5845 
5846   const std::string getAsStr() const override {
5847     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5848     for (const auto &It : AllocationInfos) {
5849       if (It.second->Status == AllocationInfo::INVALID)
5850         ++NumInvalidMallocs;
5851       else
5852         ++NumH2SMallocs;
5853     }
5854     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5855            std::to_string(NumInvalidMallocs);
5856   }
5857 
5858   /// See AbstractAttribute::trackStatistics().
5859   void trackStatistics() const override {
5860     STATS_DECL(
5861         MallocCalls, Function,
5862         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5863     for (auto &It : AllocationInfos)
5864       if (It.second->Status != AllocationInfo::INVALID)
5865         ++BUILD_STAT_NAME(MallocCalls, Function);
5866   }
5867 
5868   bool isAssumedHeapToStack(const CallBase &CB) const override {
5869     if (isValidState())
5870       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5871         return AI->Status != AllocationInfo::INVALID;
5872     return false;
5873   }
5874 
5875   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5876     if (!isValidState())
5877       return false;
5878 
5879     for (auto &It : AllocationInfos) {
5880       AllocationInfo &AI = *It.second;
5881       if (AI.Status == AllocationInfo::INVALID)
5882         continue;
5883 
5884       if (AI.PotentialFreeCalls.count(&CB))
5885         return true;
5886     }
5887 
5888     return false;
5889   }
5890 
5891   ChangeStatus manifest(Attributor &A) override {
5892     assert(getState().isValidState() &&
5893            "Attempted to manifest an invalid state!");
5894 
5895     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5896     Function *F = getAnchorScope();
5897     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5898 
5899     for (auto &It : AllocationInfos) {
5900       AllocationInfo &AI = *It.second;
5901       if (AI.Status == AllocationInfo::INVALID)
5902         continue;
5903 
5904       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5905         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5906         A.deleteAfterManifest(*FreeCall);
5907         HasChanged = ChangeStatus::CHANGED;
5908       }
5909 
5910       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5911                         << "\n");
5912 
5913       auto Remark = [&](OptimizationRemark OR) {
5914         LibFunc IsAllocShared;
5915         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5916           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5917             return OR << "Moving globalized variable to the stack.";
5918         return OR << "Moving memory allocation from the heap to the stack.";
5919       };
5920       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5921         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5922       else
5923         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5924 
5925       Value *Size;
5926       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5927       if (SizeAPI.hasValue()) {
5928         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5929       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5930         auto *Num = AI.CB->getOperand(0);
5931         auto *SizeT = AI.CB->getOperand(1);
5932         IRBuilder<> B(AI.CB);
5933         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5934       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5935         Size = AI.CB->getOperand(1);
5936       } else {
5937         Size = AI.CB->getOperand(0);
5938       }
5939 
5940       Align Alignment(1);
5941       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5942         Optional<APInt> AlignmentAPI =
5943             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5944         assert(AlignmentAPI.hasValue() &&
5945                "Expected an alignment during manifest!");
5946         Alignment =
5947             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5948       }
5949 
5950       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5951       Instruction *Alloca =
5952           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5953                          "", AI.CB->getNextNode());
5954 
5955       if (Alloca->getType() != AI.CB->getType())
5956         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5957                                  Alloca->getNextNode());
5958 
5959       A.changeValueAfterManifest(*AI.CB, *Alloca);
5960 
5961       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5962         auto *NBB = II->getNormalDest();
5963         BranchInst::Create(NBB, AI.CB->getParent());
5964         A.deleteAfterManifest(*AI.CB);
5965       } else {
5966         A.deleteAfterManifest(*AI.CB);
5967       }
5968 
5969       // Zero out the allocated memory if it was a calloc.
5970       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5971         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5972                                    Alloca->getNextNode());
5973         Value *Ops[] = {
5974             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5975             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5976 
5977         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5978         Module *M = F->getParent();
5979         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5980         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5981       }
5982       HasChanged = ChangeStatus::CHANGED;
5983     }
5984 
5985     return HasChanged;
5986   }
5987 
5988   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5989                            Value &V) {
5990     bool UsedAssumedInformation = false;
5991     Optional<Constant *> SimpleV =
5992         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5993     if (!SimpleV.hasValue())
5994       return APInt(64, 0);
5995     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5996       return CI->getValue();
5997     return llvm::None;
5998   }
5999 
6000   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
6001                           AllocationInfo &AI) {
6002 
6003     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
6004       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
6005 
6006     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6007       // Only if the alignment is also constant we return a size.
6008       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
6009                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
6010                  : llvm::None;
6011 
6012     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
6013            "Expected only callocs are left");
6014     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
6015     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
6016     if (!Num.hasValue() || !Size.hasValue())
6017       return llvm::None;
6018     bool Overflow = false;
6019     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
6020     return Overflow ? llvm::None : Size;
6021   }
6022 
6023   /// Collection of all malloc-like calls in a function with associated
6024   /// information.
6025   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6026 
6027   /// Collection of all free-like calls in a function with associated
6028   /// information.
6029   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6030 
6031   ChangeStatus updateImpl(Attributor &A) override;
6032 };
6033 
6034 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6035   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6036   const Function *F = getAnchorScope();
6037 
6038   const auto &LivenessAA =
6039       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6040 
6041   MustBeExecutedContextExplorer &Explorer =
6042       A.getInfoCache().getMustBeExecutedContextExplorer();
6043 
6044   bool StackIsAccessibleByOtherThreads =
6045       A.getInfoCache().stackIsAccessibleByOtherThreads();
6046 
6047   // Flag to ensure we update our deallocation information at most once per
6048   // updateImpl call and only if we use the free check reasoning.
6049   bool HasUpdatedFrees = false;
6050 
6051   auto UpdateFrees = [&]() {
6052     HasUpdatedFrees = true;
6053 
6054     for (auto &It : DeallocationInfos) {
6055       DeallocationInfo &DI = *It.second;
6056       // For now we cannot use deallocations that have unknown inputs, skip
6057       // them.
6058       if (DI.MightFreeUnknownObjects)
6059         continue;
6060 
6061       // No need to analyze dead calls, ignore them instead.
6062       bool UsedAssumedInformation = false;
6063       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6064                           /* CheckBBLivenessOnly */ true))
6065         continue;
6066 
6067       // Use the optimistic version to get the freed objects, ignoring dead
6068       // branches etc.
6069       SmallVector<Value *, 8> Objects;
6070       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6071                                            *this, DI.CB)) {
6072         LLVM_DEBUG(
6073             dbgs()
6074             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6075         DI.MightFreeUnknownObjects = true;
6076         continue;
6077       }
6078 
6079       // Check each object explicitly.
6080       for (auto *Obj : Objects) {
6081         // Free of null and undef can be ignored as no-ops (or UB in the latter
6082         // case).
6083         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6084           continue;
6085 
6086         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6087         if (!ObjCB) {
6088           LLVM_DEBUG(dbgs()
6089                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6090           DI.MightFreeUnknownObjects = true;
6091           continue;
6092         }
6093 
6094         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6095         if (!AI) {
6096           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6097                             << "\n");
6098           DI.MightFreeUnknownObjects = true;
6099           continue;
6100         }
6101 
6102         DI.PotentialAllocationCalls.insert(ObjCB);
6103       }
6104     }
6105   };
6106 
6107   auto FreeCheck = [&](AllocationInfo &AI) {
6108     // If the stack is not accessible by other threads, the "must-free" logic
6109     // doesn't apply as the pointer could be shared and needs to be places in
6110     // "shareable" memory.
6111     if (!StackIsAccessibleByOtherThreads) {
6112       auto &NoSyncAA =
6113           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6114       if (!NoSyncAA.isAssumedNoSync()) {
6115         LLVM_DEBUG(
6116             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6117                       "other threads and function is not nosync:\n");
6118         return false;
6119       }
6120     }
6121     if (!HasUpdatedFrees)
6122       UpdateFrees();
6123 
6124     // TODO: Allow multi exit functions that have different free calls.
6125     if (AI.PotentialFreeCalls.size() != 1) {
6126       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6127                         << AI.PotentialFreeCalls.size() << "\n");
6128       return false;
6129     }
6130     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6131     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6132     if (!DI) {
6133       LLVM_DEBUG(
6134           dbgs() << "[H2S] unique free call was not known as deallocation call "
6135                  << *UniqueFree << "\n");
6136       return false;
6137     }
6138     if (DI->MightFreeUnknownObjects) {
6139       LLVM_DEBUG(
6140           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6141       return false;
6142     }
6143     if (DI->PotentialAllocationCalls.size() > 1) {
6144       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6145                         << DI->PotentialAllocationCalls.size()
6146                         << " different allocations\n");
6147       return false;
6148     }
6149     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6150       LLVM_DEBUG(
6151           dbgs()
6152           << "[H2S] unique free call not known to free this allocation but "
6153           << **DI->PotentialAllocationCalls.begin() << "\n");
6154       return false;
6155     }
6156     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6157     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6158       LLVM_DEBUG(
6159           dbgs()
6160           << "[H2S] unique free call might not be executed with the allocation "
6161           << *UniqueFree << "\n");
6162       return false;
6163     }
6164     return true;
6165   };
6166 
6167   auto UsesCheck = [&](AllocationInfo &AI) {
6168     bool ValidUsesOnly = true;
6169 
6170     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6171       Instruction *UserI = cast<Instruction>(U.getUser());
6172       if (isa<LoadInst>(UserI))
6173         return true;
6174       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6175         if (SI->getValueOperand() == U.get()) {
6176           LLVM_DEBUG(dbgs()
6177                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6178           ValidUsesOnly = false;
6179         } else {
6180           // A store into the malloc'ed memory is fine.
6181         }
6182         return true;
6183       }
6184       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6185         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6186           return true;
6187         if (DeallocationInfos.count(CB)) {
6188           AI.PotentialFreeCalls.insert(CB);
6189           return true;
6190         }
6191 
6192         unsigned ArgNo = CB->getArgOperandNo(&U);
6193 
6194         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6195             *this, IRPosition::callsite_argument(*CB, ArgNo),
6196             DepClassTy::OPTIONAL);
6197 
6198         // If a call site argument use is nofree, we are fine.
6199         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6200             *this, IRPosition::callsite_argument(*CB, ArgNo),
6201             DepClassTy::OPTIONAL);
6202 
6203         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6204         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6205         if (MaybeCaptured ||
6206             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6207              MaybeFreed)) {
6208           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6209 
6210           // Emit a missed remark if this is missed OpenMP globalization.
6211           auto Remark = [&](OptimizationRemarkMissed ORM) {
6212             return ORM
6213                    << "Could not move globalized variable to the stack. "
6214                       "Variable is potentially captured in call. Mark "
6215                       "parameter as `__attribute__((noescape))` to override.";
6216           };
6217 
6218           if (ValidUsesOnly &&
6219               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6220             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6221 
6222           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6223           ValidUsesOnly = false;
6224         }
6225         return true;
6226       }
6227 
6228       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6229           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6230         Follow = true;
6231         return true;
6232       }
6233       // Unknown user for which we can not track uses further (in a way that
6234       // makes sense).
6235       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6236       ValidUsesOnly = false;
6237       return true;
6238     };
6239     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6240       return false;
6241     return ValidUsesOnly;
6242   };
6243 
6244   // The actual update starts here. We look at all allocations and depending on
6245   // their status perform the appropriate check(s).
6246   for (auto &It : AllocationInfos) {
6247     AllocationInfo &AI = *It.second;
6248     if (AI.Status == AllocationInfo::INVALID)
6249       continue;
6250 
6251     if (MaxHeapToStackSize == -1) {
6252       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6253         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6254           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6255                             << "\n");
6256           AI.Status = AllocationInfo::INVALID;
6257           Changed = ChangeStatus::CHANGED;
6258           continue;
6259         }
6260     } else {
6261       Optional<APInt> Size = getSize(A, *this, AI);
6262       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6263         LLVM_DEBUG({
6264           if (!Size.hasValue())
6265             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6266                    << "\n";
6267           else
6268             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6269                    << MaxHeapToStackSize << "\n";
6270         });
6271 
6272         AI.Status = AllocationInfo::INVALID;
6273         Changed = ChangeStatus::CHANGED;
6274         continue;
6275       }
6276     }
6277 
6278     switch (AI.Status) {
6279     case AllocationInfo::STACK_DUE_TO_USE:
6280       if (UsesCheck(AI))
6281         continue;
6282       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6283       LLVM_FALLTHROUGH;
6284     case AllocationInfo::STACK_DUE_TO_FREE:
6285       if (FreeCheck(AI))
6286         continue;
6287       AI.Status = AllocationInfo::INVALID;
6288       Changed = ChangeStatus::CHANGED;
6289       continue;
6290     case AllocationInfo::INVALID:
6291       llvm_unreachable("Invalid allocations should never reach this point!");
6292     };
6293   }
6294 
6295   return Changed;
6296 }
6297 
6298 /// ----------------------- Privatizable Pointers ------------------------------
6299 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6300   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6301       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6302 
6303   ChangeStatus indicatePessimisticFixpoint() override {
6304     AAPrivatizablePtr::indicatePessimisticFixpoint();
6305     PrivatizableType = nullptr;
6306     return ChangeStatus::CHANGED;
6307   }
6308 
6309   /// Identify the type we can chose for a private copy of the underlying
6310   /// argument. None means it is not clear yet, nullptr means there is none.
6311   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6312 
6313   /// Return a privatizable type that encloses both T0 and T1.
6314   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6315   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6316     if (!T0.hasValue())
6317       return T1;
6318     if (!T1.hasValue())
6319       return T0;
6320     if (T0 == T1)
6321       return T0;
6322     return nullptr;
6323   }
6324 
6325   Optional<Type *> getPrivatizableType() const override {
6326     return PrivatizableType;
6327   }
6328 
6329   const std::string getAsStr() const override {
6330     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6331   }
6332 
6333 protected:
6334   Optional<Type *> PrivatizableType;
6335 };
6336 
6337 // TODO: Do this for call site arguments (probably also other values) as well.
6338 
6339 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6340   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6341       : AAPrivatizablePtrImpl(IRP, A) {}
6342 
6343   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6344   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6345     // If this is a byval argument and we know all the call sites (so we can
6346     // rewrite them), there is no need to check them explicitly.
6347     bool AllCallSitesKnown;
6348     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6349         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6350                                true, AllCallSitesKnown))
6351       return getAssociatedValue().getType()->getPointerElementType();
6352 
6353     Optional<Type *> Ty;
6354     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6355 
6356     // Make sure the associated call site argument has the same type at all call
6357     // sites and it is an allocation we know is safe to privatize, for now that
6358     // means we only allow alloca instructions.
6359     // TODO: We can additionally analyze the accesses in the callee to  create
6360     //       the type from that information instead. That is a little more
6361     //       involved and will be done in a follow up patch.
6362     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6363       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6364       // Check if a coresponding argument was found or if it is one not
6365       // associated (which can happen for callback calls).
6366       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6367         return false;
6368 
6369       // Check that all call sites agree on a type.
6370       auto &PrivCSArgAA =
6371           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6372       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6373 
6374       LLVM_DEBUG({
6375         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6376         if (CSTy.hasValue() && CSTy.getValue())
6377           CSTy.getValue()->print(dbgs());
6378         else if (CSTy.hasValue())
6379           dbgs() << "<nullptr>";
6380         else
6381           dbgs() << "<none>";
6382       });
6383 
6384       Ty = combineTypes(Ty, CSTy);
6385 
6386       LLVM_DEBUG({
6387         dbgs() << " : New Type: ";
6388         if (Ty.hasValue() && Ty.getValue())
6389           Ty.getValue()->print(dbgs());
6390         else if (Ty.hasValue())
6391           dbgs() << "<nullptr>";
6392         else
6393           dbgs() << "<none>";
6394         dbgs() << "\n";
6395       });
6396 
6397       return !Ty.hasValue() || Ty.getValue();
6398     };
6399 
6400     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6401       return nullptr;
6402     return Ty;
6403   }
6404 
6405   /// See AbstractAttribute::updateImpl(...).
6406   ChangeStatus updateImpl(Attributor &A) override {
6407     PrivatizableType = identifyPrivatizableType(A);
6408     if (!PrivatizableType.hasValue())
6409       return ChangeStatus::UNCHANGED;
6410     if (!PrivatizableType.getValue())
6411       return indicatePessimisticFixpoint();
6412 
6413     // The dependence is optional so we don't give up once we give up on the
6414     // alignment.
6415     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6416                         DepClassTy::OPTIONAL);
6417 
6418     // Avoid arguments with padding for now.
6419     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6420         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6421                                                 A.getInfoCache().getDL())) {
6422       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6423       return indicatePessimisticFixpoint();
6424     }
6425 
6426     // Verify callee and caller agree on how the promoted argument would be
6427     // passed.
6428     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6429     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6430     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6431     Function &Fn = *getIRPosition().getAnchorScope();
6432     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6433     ArgsToPromote.insert(getAssociatedArgument());
6434     const auto *TTI =
6435         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6436     if (!TTI ||
6437         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6438             Fn, *TTI, ArgsToPromote, Dummy) ||
6439         ArgsToPromote.empty()) {
6440       LLVM_DEBUG(
6441           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6442                  << Fn.getName() << "\n");
6443       return indicatePessimisticFixpoint();
6444     }
6445 
6446     // Collect the types that will replace the privatizable type in the function
6447     // signature.
6448     SmallVector<Type *, 16> ReplacementTypes;
6449     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6450 
6451     // Register a rewrite of the argument.
6452     Argument *Arg = getAssociatedArgument();
6453     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6454       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6455       return indicatePessimisticFixpoint();
6456     }
6457 
6458     unsigned ArgNo = Arg->getArgNo();
6459 
6460     // Helper to check if for the given call site the associated argument is
6461     // passed to a callback where the privatization would be different.
6462     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6463       SmallVector<const Use *, 4> CallbackUses;
6464       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6465       for (const Use *U : CallbackUses) {
6466         AbstractCallSite CBACS(U);
6467         assert(CBACS && CBACS.isCallbackCall());
6468         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6469           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6470 
6471           LLVM_DEBUG({
6472             dbgs()
6473                 << "[AAPrivatizablePtr] Argument " << *Arg
6474                 << "check if can be privatized in the context of its parent ("
6475                 << Arg->getParent()->getName()
6476                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6477                    "callback ("
6478                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6479                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6480                 << CBACS.getCallArgOperand(CBArg) << " vs "
6481                 << CB.getArgOperand(ArgNo) << "\n"
6482                 << "[AAPrivatizablePtr] " << CBArg << " : "
6483                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6484           });
6485 
6486           if (CBArgNo != int(ArgNo))
6487             continue;
6488           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6489               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6490           if (CBArgPrivAA.isValidState()) {
6491             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6492             if (!CBArgPrivTy.hasValue())
6493               continue;
6494             if (CBArgPrivTy.getValue() == PrivatizableType)
6495               continue;
6496           }
6497 
6498           LLVM_DEBUG({
6499             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6500                    << " cannot be privatized in the context of its parent ("
6501                    << Arg->getParent()->getName()
6502                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6503                       "callback ("
6504                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6505                    << ").\n[AAPrivatizablePtr] for which the argument "
6506                       "privatization is not compatible.\n";
6507           });
6508           return false;
6509         }
6510       }
6511       return true;
6512     };
6513 
6514     // Helper to check if for the given call site the associated argument is
6515     // passed to a direct call where the privatization would be different.
6516     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6517       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6518       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6519       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
6520              "Expected a direct call operand for callback call operand");
6521 
6522       LLVM_DEBUG({
6523         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6524                << " check if be privatized in the context of its parent ("
6525                << Arg->getParent()->getName()
6526                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6527                   "direct call of ("
6528                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6529                << ").\n";
6530       });
6531 
6532       Function *DCCallee = DC->getCalledFunction();
6533       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6534         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6535             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6536             DepClassTy::REQUIRED);
6537         if (DCArgPrivAA.isValidState()) {
6538           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6539           if (!DCArgPrivTy.hasValue())
6540             return true;
6541           if (DCArgPrivTy.getValue() == PrivatizableType)
6542             return true;
6543         }
6544       }
6545 
6546       LLVM_DEBUG({
6547         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6548                << " cannot be privatized in the context of its parent ("
6549                << Arg->getParent()->getName()
6550                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6551                   "direct call of ("
6552                << ACS.getInstruction()->getCalledFunction()->getName()
6553                << ").\n[AAPrivatizablePtr] for which the argument "
6554                   "privatization is not compatible.\n";
6555       });
6556       return false;
6557     };
6558 
6559     // Helper to check if the associated argument is used at the given abstract
6560     // call site in a way that is incompatible with the privatization assumed
6561     // here.
6562     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6563       if (ACS.isDirectCall())
6564         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6565       if (ACS.isCallbackCall())
6566         return IsCompatiblePrivArgOfDirectCS(ACS);
6567       return false;
6568     };
6569 
6570     bool AllCallSitesKnown;
6571     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6572                                 AllCallSitesKnown))
6573       return indicatePessimisticFixpoint();
6574 
6575     return ChangeStatus::UNCHANGED;
6576   }
6577 
6578   /// Given a type to private \p PrivType, collect the constituates (which are
6579   /// used) in \p ReplacementTypes.
6580   static void
6581   identifyReplacementTypes(Type *PrivType,
6582                            SmallVectorImpl<Type *> &ReplacementTypes) {
6583     // TODO: For now we expand the privatization type to the fullest which can
6584     //       lead to dead arguments that need to be removed later.
6585     assert(PrivType && "Expected privatizable type!");
6586 
6587     // Traverse the type, extract constituate types on the outermost level.
6588     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6589       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6590         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6591     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6592       ReplacementTypes.append(PrivArrayType->getNumElements(),
6593                               PrivArrayType->getElementType());
6594     } else {
6595       ReplacementTypes.push_back(PrivType);
6596     }
6597   }
6598 
6599   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6600   /// The values needed are taken from the arguments of \p F starting at
6601   /// position \p ArgNo.
6602   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6603                                    unsigned ArgNo, Instruction &IP) {
6604     assert(PrivType && "Expected privatizable type!");
6605 
6606     IRBuilder<NoFolder> IRB(&IP);
6607     const DataLayout &DL = F.getParent()->getDataLayout();
6608 
6609     // Traverse the type, build GEPs and stores.
6610     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6611       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6612       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6613         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6614         Value *Ptr =
6615             constructPointer(PointeeTy, PrivType, &Base,
6616                              PrivStructLayout->getElementOffset(u), IRB, DL);
6617         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6618       }
6619     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6620       Type *PointeeTy = PrivArrayType->getElementType();
6621       Type *PointeePtrTy = PointeeTy->getPointerTo();
6622       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6623       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6624         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6625                                       u * PointeeTySize, IRB, DL);
6626         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6627       }
6628     } else {
6629       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6630     }
6631   }
6632 
6633   /// Extract values from \p Base according to the type \p PrivType at the
6634   /// call position \p ACS. The values are appended to \p ReplacementValues.
6635   void createReplacementValues(Align Alignment, Type *PrivType,
6636                                AbstractCallSite ACS, Value *Base,
6637                                SmallVectorImpl<Value *> &ReplacementValues) {
6638     assert(Base && "Expected base value!");
6639     assert(PrivType && "Expected privatizable type!");
6640     Instruction *IP = ACS.getInstruction();
6641 
6642     IRBuilder<NoFolder> IRB(IP);
6643     const DataLayout &DL = IP->getModule()->getDataLayout();
6644 
6645     if (Base->getType()->getPointerElementType() != PrivType)
6646       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6647                                                  "", ACS.getInstruction());
6648 
6649     // Traverse the type, build GEPs and loads.
6650     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6651       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6652       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6653         Type *PointeeTy = PrivStructType->getElementType(u);
6654         Value *Ptr =
6655             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6656                              PrivStructLayout->getElementOffset(u), IRB, DL);
6657         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6658         L->setAlignment(Alignment);
6659         ReplacementValues.push_back(L);
6660       }
6661     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6662       Type *PointeeTy = PrivArrayType->getElementType();
6663       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6664       Type *PointeePtrTy = PointeeTy->getPointerTo();
6665       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6666         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6667                                       u * PointeeTySize, IRB, DL);
6668         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6669         L->setAlignment(Alignment);
6670         ReplacementValues.push_back(L);
6671       }
6672     } else {
6673       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6674       L->setAlignment(Alignment);
6675       ReplacementValues.push_back(L);
6676     }
6677   }
6678 
6679   /// See AbstractAttribute::manifest(...)
6680   ChangeStatus manifest(Attributor &A) override {
6681     if (!PrivatizableType.hasValue())
6682       return ChangeStatus::UNCHANGED;
6683     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6684 
6685     // Collect all tail calls in the function as we cannot allow new allocas to
6686     // escape into tail recursion.
6687     // TODO: Be smarter about new allocas escaping into tail calls.
6688     SmallVector<CallInst *, 16> TailCalls;
6689     bool UsedAssumedInformation = false;
6690     if (!A.checkForAllInstructions(
6691             [&](Instruction &I) {
6692               CallInst &CI = cast<CallInst>(I);
6693               if (CI.isTailCall())
6694                 TailCalls.push_back(&CI);
6695               return true;
6696             },
6697             *this, {Instruction::Call}, UsedAssumedInformation))
6698       return ChangeStatus::UNCHANGED;
6699 
6700     Argument *Arg = getAssociatedArgument();
6701     // Query AAAlign attribute for alignment of associated argument to
6702     // determine the best alignment of loads.
6703     const auto &AlignAA =
6704         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6705 
6706     // Callback to repair the associated function. A new alloca is placed at the
6707     // beginning and initialized with the values passed through arguments. The
6708     // new alloca replaces the use of the old pointer argument.
6709     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6710         [=](const Attributor::ArgumentReplacementInfo &ARI,
6711             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6712           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6713           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6714           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6715                                            Arg->getName() + ".priv", IP);
6716           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6717                                ArgIt->getArgNo(), *IP);
6718 
6719           if (AI->getType() != Arg->getType())
6720             AI =
6721                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6722           Arg->replaceAllUsesWith(AI);
6723 
6724           for (CallInst *CI : TailCalls)
6725             CI->setTailCall(false);
6726         };
6727 
6728     // Callback to repair a call site of the associated function. The elements
6729     // of the privatizable type are loaded prior to the call and passed to the
6730     // new function version.
6731     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6732         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6733                       AbstractCallSite ACS,
6734                       SmallVectorImpl<Value *> &NewArgOperands) {
6735           // When no alignment is specified for the load instruction,
6736           // natural alignment is assumed.
6737           createReplacementValues(
6738               assumeAligned(AlignAA.getAssumedAlign()),
6739               PrivatizableType.getValue(), ACS,
6740               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6741               NewArgOperands);
6742         };
6743 
6744     // Collect the types that will replace the privatizable type in the function
6745     // signature.
6746     SmallVector<Type *, 16> ReplacementTypes;
6747     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6748 
6749     // Register a rewrite of the argument.
6750     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6751                                            std::move(FnRepairCB),
6752                                            std::move(ACSRepairCB)))
6753       return ChangeStatus::CHANGED;
6754     return ChangeStatus::UNCHANGED;
6755   }
6756 
6757   /// See AbstractAttribute::trackStatistics()
6758   void trackStatistics() const override {
6759     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6760   }
6761 };
6762 
6763 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6764   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6765       : AAPrivatizablePtrImpl(IRP, A) {}
6766 
6767   /// See AbstractAttribute::initialize(...).
6768   virtual void initialize(Attributor &A) override {
6769     // TODO: We can privatize more than arguments.
6770     indicatePessimisticFixpoint();
6771   }
6772 
6773   ChangeStatus updateImpl(Attributor &A) override {
6774     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6775                      "updateImpl will not be called");
6776   }
6777 
6778   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6779   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6780     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6781     if (!Obj) {
6782       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6783       return nullptr;
6784     }
6785 
6786     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6787       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6788         if (CI->isOne())
6789           return Obj->getType()->getPointerElementType();
6790     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6791       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6792           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6793       if (PrivArgAA.isAssumedPrivatizablePtr())
6794         return Obj->getType()->getPointerElementType();
6795     }
6796 
6797     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6798                          "alloca nor privatizable argument: "
6799                       << *Obj << "!\n");
6800     return nullptr;
6801   }
6802 
6803   /// See AbstractAttribute::trackStatistics()
6804   void trackStatistics() const override {
6805     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6806   }
6807 };
6808 
6809 struct AAPrivatizablePtrCallSiteArgument final
6810     : public AAPrivatizablePtrFloating {
6811   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6812       : AAPrivatizablePtrFloating(IRP, A) {}
6813 
6814   /// See AbstractAttribute::initialize(...).
6815   void initialize(Attributor &A) override {
6816     if (getIRPosition().hasAttr(Attribute::ByVal))
6817       indicateOptimisticFixpoint();
6818   }
6819 
6820   /// See AbstractAttribute::updateImpl(...).
6821   ChangeStatus updateImpl(Attributor &A) override {
6822     PrivatizableType = identifyPrivatizableType(A);
6823     if (!PrivatizableType.hasValue())
6824       return ChangeStatus::UNCHANGED;
6825     if (!PrivatizableType.getValue())
6826       return indicatePessimisticFixpoint();
6827 
6828     const IRPosition &IRP = getIRPosition();
6829     auto &NoCaptureAA =
6830         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6831     if (!NoCaptureAA.isAssumedNoCapture()) {
6832       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6833       return indicatePessimisticFixpoint();
6834     }
6835 
6836     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6837     if (!NoAliasAA.isAssumedNoAlias()) {
6838       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6839       return indicatePessimisticFixpoint();
6840     }
6841 
6842     const auto &MemBehaviorAA =
6843         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6844     if (!MemBehaviorAA.isAssumedReadOnly()) {
6845       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6846       return indicatePessimisticFixpoint();
6847     }
6848 
6849     return ChangeStatus::UNCHANGED;
6850   }
6851 
6852   /// See AbstractAttribute::trackStatistics()
6853   void trackStatistics() const override {
6854     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6855   }
6856 };
6857 
6858 struct AAPrivatizablePtrCallSiteReturned final
6859     : public AAPrivatizablePtrFloating {
6860   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6861       : AAPrivatizablePtrFloating(IRP, A) {}
6862 
6863   /// See AbstractAttribute::initialize(...).
6864   void initialize(Attributor &A) override {
6865     // TODO: We can privatize more than arguments.
6866     indicatePessimisticFixpoint();
6867   }
6868 
6869   /// See AbstractAttribute::trackStatistics()
6870   void trackStatistics() const override {
6871     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6872   }
6873 };
6874 
6875 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6876   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6877       : AAPrivatizablePtrFloating(IRP, A) {}
6878 
6879   /// See AbstractAttribute::initialize(...).
6880   void initialize(Attributor &A) override {
6881     // TODO: We can privatize more than arguments.
6882     indicatePessimisticFixpoint();
6883   }
6884 
6885   /// See AbstractAttribute::trackStatistics()
6886   void trackStatistics() const override {
6887     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6888   }
6889 };
6890 
6891 /// -------------------- Memory Behavior Attributes ----------------------------
6892 /// Includes read-none, read-only, and write-only.
6893 /// ----------------------------------------------------------------------------
6894 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6895   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6896       : AAMemoryBehavior(IRP, A) {}
6897 
6898   /// See AbstractAttribute::initialize(...).
6899   void initialize(Attributor &A) override {
6900     intersectAssumedBits(BEST_STATE);
6901     getKnownStateFromValue(getIRPosition(), getState());
6902     AAMemoryBehavior::initialize(A);
6903   }
6904 
6905   /// Return the memory behavior information encoded in the IR for \p IRP.
6906   static void getKnownStateFromValue(const IRPosition &IRP,
6907                                      BitIntegerState &State,
6908                                      bool IgnoreSubsumingPositions = false) {
6909     SmallVector<Attribute, 2> Attrs;
6910     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6911     for (const Attribute &Attr : Attrs) {
6912       switch (Attr.getKindAsEnum()) {
6913       case Attribute::ReadNone:
6914         State.addKnownBits(NO_ACCESSES);
6915         break;
6916       case Attribute::ReadOnly:
6917         State.addKnownBits(NO_WRITES);
6918         break;
6919       case Attribute::WriteOnly:
6920         State.addKnownBits(NO_READS);
6921         break;
6922       default:
6923         llvm_unreachable("Unexpected attribute!");
6924       }
6925     }
6926 
6927     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6928       if (!I->mayReadFromMemory())
6929         State.addKnownBits(NO_READS);
6930       if (!I->mayWriteToMemory())
6931         State.addKnownBits(NO_WRITES);
6932     }
6933   }
6934 
6935   /// See AbstractAttribute::getDeducedAttributes(...).
6936   void getDeducedAttributes(LLVMContext &Ctx,
6937                             SmallVectorImpl<Attribute> &Attrs) const override {
6938     assert(Attrs.size() == 0);
6939     if (isAssumedReadNone())
6940       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6941     else if (isAssumedReadOnly())
6942       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6943     else if (isAssumedWriteOnly())
6944       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6945     assert(Attrs.size() <= 1);
6946   }
6947 
6948   /// See AbstractAttribute::manifest(...).
6949   ChangeStatus manifest(Attributor &A) override {
6950     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6951       return ChangeStatus::UNCHANGED;
6952 
6953     const IRPosition &IRP = getIRPosition();
6954 
6955     // Check if we would improve the existing attributes first.
6956     SmallVector<Attribute, 4> DeducedAttrs;
6957     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6958     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6959           return IRP.hasAttr(Attr.getKindAsEnum(),
6960                              /* IgnoreSubsumingPositions */ true);
6961         }))
6962       return ChangeStatus::UNCHANGED;
6963 
6964     // Clear existing attributes.
6965     IRP.removeAttrs(AttrKinds);
6966 
6967     // Use the generic manifest method.
6968     return IRAttribute::manifest(A);
6969   }
6970 
6971   /// See AbstractState::getAsStr().
6972   const std::string getAsStr() const override {
6973     if (isAssumedReadNone())
6974       return "readnone";
6975     if (isAssumedReadOnly())
6976       return "readonly";
6977     if (isAssumedWriteOnly())
6978       return "writeonly";
6979     return "may-read/write";
6980   }
6981 
6982   /// The set of IR attributes AAMemoryBehavior deals with.
6983   static const Attribute::AttrKind AttrKinds[3];
6984 };
6985 
6986 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6987     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6988 
6989 /// Memory behavior attribute for a floating value.
6990 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6991   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6992       : AAMemoryBehaviorImpl(IRP, A) {}
6993 
6994   /// See AbstractAttribute::updateImpl(...).
6995   ChangeStatus updateImpl(Attributor &A) override;
6996 
6997   /// See AbstractAttribute::trackStatistics()
6998   void trackStatistics() const override {
6999     if (isAssumedReadNone())
7000       STATS_DECLTRACK_FLOATING_ATTR(readnone)
7001     else if (isAssumedReadOnly())
7002       STATS_DECLTRACK_FLOATING_ATTR(readonly)
7003     else if (isAssumedWriteOnly())
7004       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
7005   }
7006 
7007 private:
7008   /// Return true if users of \p UserI might access the underlying
7009   /// variable/location described by \p U and should therefore be analyzed.
7010   bool followUsersOfUseIn(Attributor &A, const Use &U,
7011                           const Instruction *UserI);
7012 
7013   /// Update the state according to the effect of use \p U in \p UserI.
7014   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
7015 };
7016 
7017 /// Memory behavior attribute for function argument.
7018 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7019   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7020       : AAMemoryBehaviorFloating(IRP, A) {}
7021 
7022   /// See AbstractAttribute::initialize(...).
7023   void initialize(Attributor &A) override {
7024     intersectAssumedBits(BEST_STATE);
7025     const IRPosition &IRP = getIRPosition();
7026     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7027     // can query it when we use has/getAttr. That would allow us to reuse the
7028     // initialize of the base class here.
7029     bool HasByVal =
7030         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7031     getKnownStateFromValue(IRP, getState(),
7032                            /* IgnoreSubsumingPositions */ HasByVal);
7033 
7034     // Initialize the use vector with all direct uses of the associated value.
7035     Argument *Arg = getAssociatedArgument();
7036     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7037       indicatePessimisticFixpoint();
7038   }
7039 
7040   ChangeStatus manifest(Attributor &A) override {
7041     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7042     if (!getAssociatedValue().getType()->isPointerTy())
7043       return ChangeStatus::UNCHANGED;
7044 
7045     // TODO: From readattrs.ll: "inalloca parameters are always
7046     //                           considered written"
7047     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7048       removeKnownBits(NO_WRITES);
7049       removeAssumedBits(NO_WRITES);
7050     }
7051     return AAMemoryBehaviorFloating::manifest(A);
7052   }
7053 
7054   /// See AbstractAttribute::trackStatistics()
7055   void trackStatistics() const override {
7056     if (isAssumedReadNone())
7057       STATS_DECLTRACK_ARG_ATTR(readnone)
7058     else if (isAssumedReadOnly())
7059       STATS_DECLTRACK_ARG_ATTR(readonly)
7060     else if (isAssumedWriteOnly())
7061       STATS_DECLTRACK_ARG_ATTR(writeonly)
7062   }
7063 };
7064 
7065 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7066   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7067       : AAMemoryBehaviorArgument(IRP, A) {}
7068 
7069   /// See AbstractAttribute::initialize(...).
7070   void initialize(Attributor &A) override {
7071     // If we don't have an associated attribute this is either a variadic call
7072     // or an indirect call, either way, nothing to do here.
7073     Argument *Arg = getAssociatedArgument();
7074     if (!Arg) {
7075       indicatePessimisticFixpoint();
7076       return;
7077     }
7078     if (Arg->hasByValAttr()) {
7079       addKnownBits(NO_WRITES);
7080       removeKnownBits(NO_READS);
7081       removeAssumedBits(NO_READS);
7082     }
7083     AAMemoryBehaviorArgument::initialize(A);
7084     if (getAssociatedFunction()->isDeclaration())
7085       indicatePessimisticFixpoint();
7086   }
7087 
7088   /// See AbstractAttribute::updateImpl(...).
7089   ChangeStatus updateImpl(Attributor &A) override {
7090     // TODO: Once we have call site specific value information we can provide
7091     //       call site specific liveness liveness information and then it makes
7092     //       sense to specialize attributes for call sites arguments instead of
7093     //       redirecting requests to the callee argument.
7094     Argument *Arg = getAssociatedArgument();
7095     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7096     auto &ArgAA =
7097         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7098     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7099   }
7100 
7101   /// See AbstractAttribute::trackStatistics()
7102   void trackStatistics() const override {
7103     if (isAssumedReadNone())
7104       STATS_DECLTRACK_CSARG_ATTR(readnone)
7105     else if (isAssumedReadOnly())
7106       STATS_DECLTRACK_CSARG_ATTR(readonly)
7107     else if (isAssumedWriteOnly())
7108       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7109   }
7110 };
7111 
7112 /// Memory behavior attribute for a call site return position.
7113 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7114   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7115       : AAMemoryBehaviorFloating(IRP, A) {}
7116 
7117   /// See AbstractAttribute::initialize(...).
7118   void initialize(Attributor &A) override {
7119     AAMemoryBehaviorImpl::initialize(A);
7120     Function *F = getAssociatedFunction();
7121     if (!F || F->isDeclaration())
7122       indicatePessimisticFixpoint();
7123   }
7124 
7125   /// See AbstractAttribute::manifest(...).
7126   ChangeStatus manifest(Attributor &A) override {
7127     // We do not annotate returned values.
7128     return ChangeStatus::UNCHANGED;
7129   }
7130 
7131   /// See AbstractAttribute::trackStatistics()
7132   void trackStatistics() const override {}
7133 };
7134 
7135 /// An AA to represent the memory behavior function attributes.
7136 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7137   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7138       : AAMemoryBehaviorImpl(IRP, A) {}
7139 
7140   /// See AbstractAttribute::updateImpl(Attributor &A).
7141   virtual ChangeStatus updateImpl(Attributor &A) override;
7142 
7143   /// See AbstractAttribute::manifest(...).
7144   ChangeStatus manifest(Attributor &A) override {
7145     Function &F = cast<Function>(getAnchorValue());
7146     if (isAssumedReadNone()) {
7147       F.removeFnAttr(Attribute::ArgMemOnly);
7148       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7149       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7150     }
7151     return AAMemoryBehaviorImpl::manifest(A);
7152   }
7153 
7154   /// See AbstractAttribute::trackStatistics()
7155   void trackStatistics() const override {
7156     if (isAssumedReadNone())
7157       STATS_DECLTRACK_FN_ATTR(readnone)
7158     else if (isAssumedReadOnly())
7159       STATS_DECLTRACK_FN_ATTR(readonly)
7160     else if (isAssumedWriteOnly())
7161       STATS_DECLTRACK_FN_ATTR(writeonly)
7162   }
7163 };
7164 
7165 /// AAMemoryBehavior attribute for call sites.
7166 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7167   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7168       : AAMemoryBehaviorImpl(IRP, A) {}
7169 
7170   /// See AbstractAttribute::initialize(...).
7171   void initialize(Attributor &A) override {
7172     AAMemoryBehaviorImpl::initialize(A);
7173     Function *F = getAssociatedFunction();
7174     if (!F || F->isDeclaration())
7175       indicatePessimisticFixpoint();
7176   }
7177 
7178   /// See AbstractAttribute::updateImpl(...).
7179   ChangeStatus updateImpl(Attributor &A) override {
7180     // TODO: Once we have call site specific value information we can provide
7181     //       call site specific liveness liveness information and then it makes
7182     //       sense to specialize attributes for call sites arguments instead of
7183     //       redirecting requests to the callee argument.
7184     Function *F = getAssociatedFunction();
7185     const IRPosition &FnPos = IRPosition::function(*F);
7186     auto &FnAA =
7187         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7188     return clampStateAndIndicateChange(getState(), FnAA.getState());
7189   }
7190 
7191   /// See AbstractAttribute::trackStatistics()
7192   void trackStatistics() const override {
7193     if (isAssumedReadNone())
7194       STATS_DECLTRACK_CS_ATTR(readnone)
7195     else if (isAssumedReadOnly())
7196       STATS_DECLTRACK_CS_ATTR(readonly)
7197     else if (isAssumedWriteOnly())
7198       STATS_DECLTRACK_CS_ATTR(writeonly)
7199   }
7200 };
7201 
7202 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7203 
7204   // The current assumed state used to determine a change.
7205   auto AssumedState = getAssumed();
7206 
7207   auto CheckRWInst = [&](Instruction &I) {
7208     // If the instruction has an own memory behavior state, use it to restrict
7209     // the local state. No further analysis is required as the other memory
7210     // state is as optimistic as it gets.
7211     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7212       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7213           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7214       intersectAssumedBits(MemBehaviorAA.getAssumed());
7215       return !isAtFixpoint();
7216     }
7217 
7218     // Remove access kind modifiers if necessary.
7219     if (I.mayReadFromMemory())
7220       removeAssumedBits(NO_READS);
7221     if (I.mayWriteToMemory())
7222       removeAssumedBits(NO_WRITES);
7223     return !isAtFixpoint();
7224   };
7225 
7226   bool UsedAssumedInformation = false;
7227   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7228                                           UsedAssumedInformation))
7229     return indicatePessimisticFixpoint();
7230 
7231   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7232                                         : ChangeStatus::UNCHANGED;
7233 }
7234 
7235 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7236 
7237   const IRPosition &IRP = getIRPosition();
7238   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7239   AAMemoryBehavior::StateType &S = getState();
7240 
7241   // First, check the function scope. We take the known information and we avoid
7242   // work if the assumed information implies the current assumed information for
7243   // this attribute. This is a valid for all but byval arguments.
7244   Argument *Arg = IRP.getAssociatedArgument();
7245   AAMemoryBehavior::base_t FnMemAssumedState =
7246       AAMemoryBehavior::StateType::getWorstState();
7247   if (!Arg || !Arg->hasByValAttr()) {
7248     const auto &FnMemAA =
7249         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7250     FnMemAssumedState = FnMemAA.getAssumed();
7251     S.addKnownBits(FnMemAA.getKnown());
7252     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7253       return ChangeStatus::UNCHANGED;
7254   }
7255 
7256   // The current assumed state used to determine a change.
7257   auto AssumedState = S.getAssumed();
7258 
7259   // Make sure the value is not captured (except through "return"), if
7260   // it is, any information derived would be irrelevant anyway as we cannot
7261   // check the potential aliases introduced by the capture. However, no need
7262   // to fall back to anythign less optimistic than the function state.
7263   const auto &ArgNoCaptureAA =
7264       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7265   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7266     S.intersectAssumedBits(FnMemAssumedState);
7267     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7268                                           : ChangeStatus::UNCHANGED;
7269   }
7270 
7271   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7272   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7273     Instruction *UserI = cast<Instruction>(U.getUser());
7274     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7275                       << " \n");
7276 
7277     // Droppable users, e.g., llvm::assume does not actually perform any action.
7278     if (UserI->isDroppable())
7279       return true;
7280 
7281     // Check if the users of UserI should also be visited.
7282     Follow = followUsersOfUseIn(A, U, UserI);
7283 
7284     // If UserI might touch memory we analyze the use in detail.
7285     if (UserI->mayReadOrWriteMemory())
7286       analyzeUseIn(A, U, UserI);
7287 
7288     return !isAtFixpoint();
7289   };
7290 
7291   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7292     return indicatePessimisticFixpoint();
7293 
7294   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7295                                         : ChangeStatus::UNCHANGED;
7296 }
7297 
7298 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7299                                                   const Instruction *UserI) {
7300   // The loaded value is unrelated to the pointer argument, no need to
7301   // follow the users of the load.
7302   if (isa<LoadInst>(UserI))
7303     return false;
7304 
7305   // By default we follow all uses assuming UserI might leak information on U,
7306   // we have special handling for call sites operands though.
7307   const auto *CB = dyn_cast<CallBase>(UserI);
7308   if (!CB || !CB->isArgOperand(&U))
7309     return true;
7310 
7311   // If the use is a call argument known not to be captured, the users of
7312   // the call do not need to be visited because they have to be unrelated to
7313   // the input. Note that this check is not trivial even though we disallow
7314   // general capturing of the underlying argument. The reason is that the
7315   // call might the argument "through return", which we allow and for which we
7316   // need to check call users.
7317   if (U.get()->getType()->isPointerTy()) {
7318     unsigned ArgNo = CB->getArgOperandNo(&U);
7319     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7320         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7321     return !ArgNoCaptureAA.isAssumedNoCapture();
7322   }
7323 
7324   return true;
7325 }
7326 
7327 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7328                                             const Instruction *UserI) {
7329   assert(UserI->mayReadOrWriteMemory());
7330 
7331   switch (UserI->getOpcode()) {
7332   default:
7333     // TODO: Handle all atomics and other side-effect operations we know of.
7334     break;
7335   case Instruction::Load:
7336     // Loads cause the NO_READS property to disappear.
7337     removeAssumedBits(NO_READS);
7338     return;
7339 
7340   case Instruction::Store:
7341     // Stores cause the NO_WRITES property to disappear if the use is the
7342     // pointer operand. Note that we do assume that capturing was taken care of
7343     // somewhere else.
7344     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7345       removeAssumedBits(NO_WRITES);
7346     return;
7347 
7348   case Instruction::Call:
7349   case Instruction::CallBr:
7350   case Instruction::Invoke: {
7351     // For call sites we look at the argument memory behavior attribute (this
7352     // could be recursive!) in order to restrict our own state.
7353     const auto *CB = cast<CallBase>(UserI);
7354 
7355     // Give up on operand bundles.
7356     if (CB->isBundleOperand(&U)) {
7357       indicatePessimisticFixpoint();
7358       return;
7359     }
7360 
7361     // Calling a function does read the function pointer, maybe write it if the
7362     // function is self-modifying.
7363     if (CB->isCallee(&U)) {
7364       removeAssumedBits(NO_READS);
7365       break;
7366     }
7367 
7368     // Adjust the possible access behavior based on the information on the
7369     // argument.
7370     IRPosition Pos;
7371     if (U.get()->getType()->isPointerTy())
7372       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7373     else
7374       Pos = IRPosition::callsite_function(*CB);
7375     const auto &MemBehaviorAA =
7376         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7377     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7378     // and at least "known".
7379     intersectAssumedBits(MemBehaviorAA.getAssumed());
7380     return;
7381   }
7382   };
7383 
7384   // Generally, look at the "may-properties" and adjust the assumed state if we
7385   // did not trigger special handling before.
7386   if (UserI->mayReadFromMemory())
7387     removeAssumedBits(NO_READS);
7388   if (UserI->mayWriteToMemory())
7389     removeAssumedBits(NO_WRITES);
7390 }
7391 
7392 /// -------------------- Memory Locations Attributes ---------------------------
7393 /// Includes read-none, argmemonly, inaccessiblememonly,
7394 /// inaccessiblememorargmemonly
7395 /// ----------------------------------------------------------------------------
7396 
7397 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7398     AAMemoryLocation::MemoryLocationsKind MLK) {
7399   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7400     return "all memory";
7401   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7402     return "no memory";
7403   std::string S = "memory:";
7404   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7405     S += "stack,";
7406   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7407     S += "constant,";
7408   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7409     S += "internal global,";
7410   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7411     S += "external global,";
7412   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7413     S += "argument,";
7414   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7415     S += "inaccessible,";
7416   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7417     S += "malloced,";
7418   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7419     S += "unknown,";
7420   S.pop_back();
7421   return S;
7422 }
7423 
7424 namespace {
7425 struct AAMemoryLocationImpl : public AAMemoryLocation {
7426 
7427   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7428       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7429     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7430       AccessKind2Accesses[u] = nullptr;
7431   }
7432 
7433   ~AAMemoryLocationImpl() {
7434     // The AccessSets are allocated via a BumpPtrAllocator, we call
7435     // the destructor manually.
7436     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7437       if (AccessKind2Accesses[u])
7438         AccessKind2Accesses[u]->~AccessSet();
7439   }
7440 
7441   /// See AbstractAttribute::initialize(...).
7442   void initialize(Attributor &A) override {
7443     intersectAssumedBits(BEST_STATE);
7444     getKnownStateFromValue(A, getIRPosition(), getState());
7445     AAMemoryLocation::initialize(A);
7446   }
7447 
7448   /// Return the memory behavior information encoded in the IR for \p IRP.
7449   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7450                                      BitIntegerState &State,
7451                                      bool IgnoreSubsumingPositions = false) {
7452     // For internal functions we ignore `argmemonly` and
7453     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7454     // constant propagation. It is unclear if this is the best way but it is
7455     // unlikely this will cause real performance problems. If we are deriving
7456     // attributes for the anchor function we even remove the attribute in
7457     // addition to ignoring it.
7458     bool UseArgMemOnly = true;
7459     Function *AnchorFn = IRP.getAnchorScope();
7460     if (AnchorFn && A.isRunOn(*AnchorFn))
7461       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7462 
7463     SmallVector<Attribute, 2> Attrs;
7464     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7465     for (const Attribute &Attr : Attrs) {
7466       switch (Attr.getKindAsEnum()) {
7467       case Attribute::ReadNone:
7468         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7469         break;
7470       case Attribute::InaccessibleMemOnly:
7471         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7472         break;
7473       case Attribute::ArgMemOnly:
7474         if (UseArgMemOnly)
7475           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7476         else
7477           IRP.removeAttrs({Attribute::ArgMemOnly});
7478         break;
7479       case Attribute::InaccessibleMemOrArgMemOnly:
7480         if (UseArgMemOnly)
7481           State.addKnownBits(inverseLocation(
7482               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7483         else
7484           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7485         break;
7486       default:
7487         llvm_unreachable("Unexpected attribute!");
7488       }
7489     }
7490   }
7491 
7492   /// See AbstractAttribute::getDeducedAttributes(...).
7493   void getDeducedAttributes(LLVMContext &Ctx,
7494                             SmallVectorImpl<Attribute> &Attrs) const override {
7495     assert(Attrs.size() == 0);
7496     if (isAssumedReadNone()) {
7497       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7498     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7499       if (isAssumedInaccessibleMemOnly())
7500         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7501       else if (isAssumedArgMemOnly())
7502         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7503       else if (isAssumedInaccessibleOrArgMemOnly())
7504         Attrs.push_back(
7505             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7506     }
7507     assert(Attrs.size() <= 1);
7508   }
7509 
7510   /// See AbstractAttribute::manifest(...).
7511   ChangeStatus manifest(Attributor &A) override {
7512     const IRPosition &IRP = getIRPosition();
7513 
7514     // Check if we would improve the existing attributes first.
7515     SmallVector<Attribute, 4> DeducedAttrs;
7516     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7517     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7518           return IRP.hasAttr(Attr.getKindAsEnum(),
7519                              /* IgnoreSubsumingPositions */ true);
7520         }))
7521       return ChangeStatus::UNCHANGED;
7522 
7523     // Clear existing attributes.
7524     IRP.removeAttrs(AttrKinds);
7525     if (isAssumedReadNone())
7526       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7527 
7528     // Use the generic manifest method.
7529     return IRAttribute::manifest(A);
7530   }
7531 
7532   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7533   bool checkForAllAccessesToMemoryKind(
7534       function_ref<bool(const Instruction *, const Value *, AccessKind,
7535                         MemoryLocationsKind)>
7536           Pred,
7537       MemoryLocationsKind RequestedMLK) const override {
7538     if (!isValidState())
7539       return false;
7540 
7541     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7542     if (AssumedMLK == NO_LOCATIONS)
7543       return true;
7544 
7545     unsigned Idx = 0;
7546     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7547          CurMLK *= 2, ++Idx) {
7548       if (CurMLK & RequestedMLK)
7549         continue;
7550 
7551       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7552         for (const AccessInfo &AI : *Accesses)
7553           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7554             return false;
7555     }
7556 
7557     return true;
7558   }
7559 
7560   ChangeStatus indicatePessimisticFixpoint() override {
7561     // If we give up and indicate a pessimistic fixpoint this instruction will
7562     // become an access for all potential access kinds:
7563     // TODO: Add pointers for argmemonly and globals to improve the results of
7564     //       checkForAllAccessesToMemoryKind.
7565     bool Changed = false;
7566     MemoryLocationsKind KnownMLK = getKnown();
7567     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7568     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7569       if (!(CurMLK & KnownMLK))
7570         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7571                                   getAccessKindFromInst(I));
7572     return AAMemoryLocation::indicatePessimisticFixpoint();
7573   }
7574 
7575 protected:
7576   /// Helper struct to tie together an instruction that has a read or write
7577   /// effect with the pointer it accesses (if any).
7578   struct AccessInfo {
7579 
7580     /// The instruction that caused the access.
7581     const Instruction *I;
7582 
7583     /// The base pointer that is accessed, or null if unknown.
7584     const Value *Ptr;
7585 
7586     /// The kind of access (read/write/read+write).
7587     AccessKind Kind;
7588 
7589     bool operator==(const AccessInfo &RHS) const {
7590       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7591     }
7592     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7593       if (LHS.I != RHS.I)
7594         return LHS.I < RHS.I;
7595       if (LHS.Ptr != RHS.Ptr)
7596         return LHS.Ptr < RHS.Ptr;
7597       if (LHS.Kind != RHS.Kind)
7598         return LHS.Kind < RHS.Kind;
7599       return false;
7600     }
7601   };
7602 
7603   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7604   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7605   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7606   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7607 
7608   /// Categorize the pointer arguments of CB that might access memory in
7609   /// AccessedLoc and update the state and access map accordingly.
7610   void
7611   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7612                                      AAMemoryLocation::StateType &AccessedLocs,
7613                                      bool &Changed);
7614 
7615   /// Return the kind(s) of location that may be accessed by \p V.
7616   AAMemoryLocation::MemoryLocationsKind
7617   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7618 
7619   /// Return the access kind as determined by \p I.
7620   AccessKind getAccessKindFromInst(const Instruction *I) {
7621     AccessKind AK = READ_WRITE;
7622     if (I) {
7623       AK = I->mayReadFromMemory() ? READ : NONE;
7624       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7625     }
7626     return AK;
7627   }
7628 
7629   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7630   /// an access of kind \p AK to a \p MLK memory location with the access
7631   /// pointer \p Ptr.
7632   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7633                                  MemoryLocationsKind MLK, const Instruction *I,
7634                                  const Value *Ptr, bool &Changed,
7635                                  AccessKind AK = READ_WRITE) {
7636 
7637     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7638     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7639     if (!Accesses)
7640       Accesses = new (Allocator) AccessSet();
7641     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7642     State.removeAssumedBits(MLK);
7643   }
7644 
7645   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7646   /// arguments, and update the state and access map accordingly.
7647   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7648                           AAMemoryLocation::StateType &State, bool &Changed);
7649 
7650   /// Used to allocate access sets.
7651   BumpPtrAllocator &Allocator;
7652 
7653   /// The set of IR attributes AAMemoryLocation deals with.
7654   static const Attribute::AttrKind AttrKinds[4];
7655 };
7656 
7657 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7658     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7659     Attribute::InaccessibleMemOrArgMemOnly};
7660 
7661 void AAMemoryLocationImpl::categorizePtrValue(
7662     Attributor &A, const Instruction &I, const Value &Ptr,
7663     AAMemoryLocation::StateType &State, bool &Changed) {
7664   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7665                     << Ptr << " ["
7666                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7667 
7668   SmallVector<Value *, 8> Objects;
7669   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7670     LLVM_DEBUG(
7671         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7672     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7673                               getAccessKindFromInst(&I));
7674     return;
7675   }
7676 
7677   for (Value *Obj : Objects) {
7678     // TODO: recognize the TBAA used for constant accesses.
7679     MemoryLocationsKind MLK = NO_LOCATIONS;
7680     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7681     if (isa<UndefValue>(Obj))
7682       continue;
7683     if (isa<Argument>(Obj)) {
7684       // TODO: For now we do not treat byval arguments as local copies performed
7685       // on the call edge, though, we should. To make that happen we need to
7686       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7687       // would also allow us to mark functions only accessing byval arguments as
7688       // readnone again, atguably their acceses have no effect outside of the
7689       // function, like accesses to allocas.
7690       MLK = NO_ARGUMENT_MEM;
7691     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7692       // Reading constant memory is not treated as a read "effect" by the
7693       // function attr pass so we won't neither. Constants defined by TBAA are
7694       // similar. (We know we do not write it because it is constant.)
7695       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7696         if (GVar->isConstant())
7697           continue;
7698 
7699       if (GV->hasLocalLinkage())
7700         MLK = NO_GLOBAL_INTERNAL_MEM;
7701       else
7702         MLK = NO_GLOBAL_EXTERNAL_MEM;
7703     } else if (isa<ConstantPointerNull>(Obj) &&
7704                !NullPointerIsDefined(getAssociatedFunction(),
7705                                      Ptr.getType()->getPointerAddressSpace())) {
7706       continue;
7707     } else if (isa<AllocaInst>(Obj)) {
7708       MLK = NO_LOCAL_MEM;
7709     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7710       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7711           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7712       if (NoAliasAA.isAssumedNoAlias())
7713         MLK = NO_MALLOCED_MEM;
7714       else
7715         MLK = NO_UNKOWN_MEM;
7716     } else {
7717       MLK = NO_UNKOWN_MEM;
7718     }
7719 
7720     assert(MLK != NO_LOCATIONS && "No location specified!");
7721     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7722                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7723                       << "\n");
7724     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7725                               getAccessKindFromInst(&I));
7726   }
7727 
7728   LLVM_DEBUG(
7729       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7730              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7731 }
7732 
7733 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7734     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7735     bool &Changed) {
7736   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
7737 
7738     // Skip non-pointer arguments.
7739     const Value *ArgOp = CB.getArgOperand(ArgNo);
7740     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7741       continue;
7742 
7743     // Skip readnone arguments.
7744     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7745     const auto &ArgOpMemLocationAA =
7746         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7747 
7748     if (ArgOpMemLocationAA.isAssumedReadNone())
7749       continue;
7750 
7751     // Categorize potentially accessed pointer arguments as if there was an
7752     // access instruction with them as pointer.
7753     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7754   }
7755 }
7756 
7757 AAMemoryLocation::MemoryLocationsKind
7758 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7759                                                   bool &Changed) {
7760   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7761                     << I << "\n");
7762 
7763   AAMemoryLocation::StateType AccessedLocs;
7764   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7765 
7766   if (auto *CB = dyn_cast<CallBase>(&I)) {
7767 
7768     // First check if we assume any memory is access is visible.
7769     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7770         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7771     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7772                       << " [" << CBMemLocationAA << "]\n");
7773 
7774     if (CBMemLocationAA.isAssumedReadNone())
7775       return NO_LOCATIONS;
7776 
7777     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7778       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7779                                 Changed, getAccessKindFromInst(&I));
7780       return AccessedLocs.getAssumed();
7781     }
7782 
7783     uint32_t CBAssumedNotAccessedLocs =
7784         CBMemLocationAA.getAssumedNotAccessedLocation();
7785 
7786     // Set the argmemonly and global bit as we handle them separately below.
7787     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7788         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7789 
7790     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7791       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7792         continue;
7793       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7794                                 getAccessKindFromInst(&I));
7795     }
7796 
7797     // Now handle global memory if it might be accessed. This is slightly tricky
7798     // as NO_GLOBAL_MEM has multiple bits set.
7799     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7800     if (HasGlobalAccesses) {
7801       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7802                             AccessKind Kind, MemoryLocationsKind MLK) {
7803         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7804                                   getAccessKindFromInst(&I));
7805         return true;
7806       };
7807       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7808               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7809         return AccessedLocs.getWorstState();
7810     }
7811 
7812     LLVM_DEBUG(
7813         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7814                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7815 
7816     // Now handle argument memory if it might be accessed.
7817     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7818     if (HasArgAccesses)
7819       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7820 
7821     LLVM_DEBUG(
7822         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7823                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7824 
7825     return AccessedLocs.getAssumed();
7826   }
7827 
7828   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7829     LLVM_DEBUG(
7830         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7831                << I << " [" << *Ptr << "]\n");
7832     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7833     return AccessedLocs.getAssumed();
7834   }
7835 
7836   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7837                     << I << "\n");
7838   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7839                             getAccessKindFromInst(&I));
7840   return AccessedLocs.getAssumed();
7841 }
7842 
7843 /// An AA to represent the memory behavior function attributes.
7844 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7845   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7846       : AAMemoryLocationImpl(IRP, A) {}
7847 
7848   /// See AbstractAttribute::updateImpl(Attributor &A).
7849   virtual ChangeStatus updateImpl(Attributor &A) override {
7850 
7851     const auto &MemBehaviorAA =
7852         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7853     if (MemBehaviorAA.isAssumedReadNone()) {
7854       if (MemBehaviorAA.isKnownReadNone())
7855         return indicateOptimisticFixpoint();
7856       assert(isAssumedReadNone() &&
7857              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7858       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7859       return ChangeStatus::UNCHANGED;
7860     }
7861 
7862     // The current assumed state used to determine a change.
7863     auto AssumedState = getAssumed();
7864     bool Changed = false;
7865 
7866     auto CheckRWInst = [&](Instruction &I) {
7867       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7868       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7869                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7870       removeAssumedBits(inverseLocation(MLK, false, false));
7871       // Stop once only the valid bit set in the *not assumed location*, thus
7872       // once we don't actually exclude any memory locations in the state.
7873       return getAssumedNotAccessedLocation() != VALID_STATE;
7874     };
7875 
7876     bool UsedAssumedInformation = false;
7877     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7878                                             UsedAssumedInformation))
7879       return indicatePessimisticFixpoint();
7880 
7881     Changed |= AssumedState != getAssumed();
7882     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7883   }
7884 
7885   /// See AbstractAttribute::trackStatistics()
7886   void trackStatistics() const override {
7887     if (isAssumedReadNone())
7888       STATS_DECLTRACK_FN_ATTR(readnone)
7889     else if (isAssumedArgMemOnly())
7890       STATS_DECLTRACK_FN_ATTR(argmemonly)
7891     else if (isAssumedInaccessibleMemOnly())
7892       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7893     else if (isAssumedInaccessibleOrArgMemOnly())
7894       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7895   }
7896 };
7897 
7898 /// AAMemoryLocation attribute for call sites.
7899 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7900   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7901       : AAMemoryLocationImpl(IRP, A) {}
7902 
7903   /// See AbstractAttribute::initialize(...).
7904   void initialize(Attributor &A) override {
7905     AAMemoryLocationImpl::initialize(A);
7906     Function *F = getAssociatedFunction();
7907     if (!F || F->isDeclaration())
7908       indicatePessimisticFixpoint();
7909   }
7910 
7911   /// See AbstractAttribute::updateImpl(...).
7912   ChangeStatus updateImpl(Attributor &A) override {
7913     // TODO: Once we have call site specific value information we can provide
7914     //       call site specific liveness liveness information and then it makes
7915     //       sense to specialize attributes for call sites arguments instead of
7916     //       redirecting requests to the callee argument.
7917     Function *F = getAssociatedFunction();
7918     const IRPosition &FnPos = IRPosition::function(*F);
7919     auto &FnAA =
7920         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7921     bool Changed = false;
7922     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7923                           AccessKind Kind, MemoryLocationsKind MLK) {
7924       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7925                                 getAccessKindFromInst(I));
7926       return true;
7927     };
7928     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7929       return indicatePessimisticFixpoint();
7930     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7931   }
7932 
7933   /// See AbstractAttribute::trackStatistics()
7934   void trackStatistics() const override {
7935     if (isAssumedReadNone())
7936       STATS_DECLTRACK_CS_ATTR(readnone)
7937   }
7938 };
7939 
7940 /// ------------------ Value Constant Range Attribute -------------------------
7941 
7942 struct AAValueConstantRangeImpl : AAValueConstantRange {
7943   using StateType = IntegerRangeState;
7944   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7945       : AAValueConstantRange(IRP, A) {}
7946 
7947   /// See AbstractAttribute::initialize(..).
7948   void initialize(Attributor &A) override {
7949     if (A.hasSimplificationCallback(getIRPosition())) {
7950       indicatePessimisticFixpoint();
7951       return;
7952     }
7953 
7954     // Intersect a range given by SCEV.
7955     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7956 
7957     // Intersect a range given by LVI.
7958     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7959   }
7960 
7961   /// See AbstractAttribute::getAsStr().
7962   const std::string getAsStr() const override {
7963     std::string Str;
7964     llvm::raw_string_ostream OS(Str);
7965     OS << "range(" << getBitWidth() << ")<";
7966     getKnown().print(OS);
7967     OS << " / ";
7968     getAssumed().print(OS);
7969     OS << ">";
7970     return OS.str();
7971   }
7972 
7973   /// Helper function to get a SCEV expr for the associated value at program
7974   /// point \p I.
7975   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7976     if (!getAnchorScope())
7977       return nullptr;
7978 
7979     ScalarEvolution *SE =
7980         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7981             *getAnchorScope());
7982 
7983     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7984         *getAnchorScope());
7985 
7986     if (!SE || !LI)
7987       return nullptr;
7988 
7989     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7990     if (!I)
7991       return S;
7992 
7993     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7994   }
7995 
7996   /// Helper function to get a range from SCEV for the associated value at
7997   /// program point \p I.
7998   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7999                                          const Instruction *I = nullptr) const {
8000     if (!getAnchorScope())
8001       return getWorstState(getBitWidth());
8002 
8003     ScalarEvolution *SE =
8004         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
8005             *getAnchorScope());
8006 
8007     const SCEV *S = getSCEV(A, I);
8008     if (!SE || !S)
8009       return getWorstState(getBitWidth());
8010 
8011     return SE->getUnsignedRange(S);
8012   }
8013 
8014   /// Helper function to get a range from LVI for the associated value at
8015   /// program point \p I.
8016   ConstantRange
8017   getConstantRangeFromLVI(Attributor &A,
8018                           const Instruction *CtxI = nullptr) const {
8019     if (!getAnchorScope())
8020       return getWorstState(getBitWidth());
8021 
8022     LazyValueInfo *LVI =
8023         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8024             *getAnchorScope());
8025 
8026     if (!LVI || !CtxI)
8027       return getWorstState(getBitWidth());
8028     return LVI->getConstantRange(&getAssociatedValue(),
8029                                  const_cast<Instruction *>(CtxI));
8030   }
8031 
8032   /// Return true if \p CtxI is valid for querying outside analyses.
8033   /// This basically makes sure we do not ask intra-procedural analysis
8034   /// about a context in the wrong function or a context that violates
8035   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8036   /// if the original context of this AA is OK or should be considered invalid.
8037   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8038                                                const Instruction *CtxI,
8039                                                bool AllowAACtxI) const {
8040     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8041       return false;
8042 
8043     // Our context might be in a different function, neither intra-procedural
8044     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8045     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8046       return false;
8047 
8048     // If the context is not dominated by the value there are paths to the
8049     // context that do not define the value. This cannot be handled by
8050     // LazyValueInfo so we need to bail.
8051     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8052       InformationCache &InfoCache = A.getInfoCache();
8053       const DominatorTree *DT =
8054           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8055               *I->getFunction());
8056       return DT && DT->dominates(I, CtxI);
8057     }
8058 
8059     return true;
8060   }
8061 
8062   /// See AAValueConstantRange::getKnownConstantRange(..).
8063   ConstantRange
8064   getKnownConstantRange(Attributor &A,
8065                         const Instruction *CtxI = nullptr) const override {
8066     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8067                                                  /* AllowAACtxI */ false))
8068       return getKnown();
8069 
8070     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8071     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8072     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8073   }
8074 
8075   /// See AAValueConstantRange::getAssumedConstantRange(..).
8076   ConstantRange
8077   getAssumedConstantRange(Attributor &A,
8078                           const Instruction *CtxI = nullptr) const override {
8079     // TODO: Make SCEV use Attributor assumption.
8080     //       We may be able to bound a variable range via assumptions in
8081     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8082     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8083     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8084                                                  /* AllowAACtxI */ false))
8085       return getAssumed();
8086 
8087     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8088     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8089     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8090   }
8091 
8092   /// Helper function to create MDNode for range metadata.
8093   static MDNode *
8094   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8095                             const ConstantRange &AssumedConstantRange) {
8096     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8097                                   Ty, AssumedConstantRange.getLower())),
8098                               ConstantAsMetadata::get(ConstantInt::get(
8099                                   Ty, AssumedConstantRange.getUpper()))};
8100     return MDNode::get(Ctx, LowAndHigh);
8101   }
8102 
8103   /// Return true if \p Assumed is included in \p KnownRanges.
8104   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8105 
8106     if (Assumed.isFullSet())
8107       return false;
8108 
8109     if (!KnownRanges)
8110       return true;
8111 
8112     // If multiple ranges are annotated in IR, we give up to annotate assumed
8113     // range for now.
8114 
8115     // TODO:  If there exists a known range which containts assumed range, we
8116     // can say assumed range is better.
8117     if (KnownRanges->getNumOperands() > 2)
8118       return false;
8119 
8120     ConstantInt *Lower =
8121         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8122     ConstantInt *Upper =
8123         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8124 
8125     ConstantRange Known(Lower->getValue(), Upper->getValue());
8126     return Known.contains(Assumed) && Known != Assumed;
8127   }
8128 
8129   /// Helper function to set range metadata.
8130   static bool
8131   setRangeMetadataIfisBetterRange(Instruction *I,
8132                                   const ConstantRange &AssumedConstantRange) {
8133     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8134     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8135       if (!AssumedConstantRange.isEmptySet()) {
8136         I->setMetadata(LLVMContext::MD_range,
8137                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8138                                                  AssumedConstantRange));
8139         return true;
8140       }
8141     }
8142     return false;
8143   }
8144 
8145   /// See AbstractAttribute::manifest()
8146   ChangeStatus manifest(Attributor &A) override {
8147     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8148     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8149     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8150 
8151     auto &V = getAssociatedValue();
8152     if (!AssumedConstantRange.isEmptySet() &&
8153         !AssumedConstantRange.isSingleElement()) {
8154       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8155         assert(I == getCtxI() && "Should not annotate an instruction which is "
8156                                  "not the context instruction");
8157         if (isa<CallInst>(I) || isa<LoadInst>(I))
8158           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8159             Changed = ChangeStatus::CHANGED;
8160       }
8161     }
8162 
8163     return Changed;
8164   }
8165 };
8166 
8167 struct AAValueConstantRangeArgument final
8168     : AAArgumentFromCallSiteArguments<
8169           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8170           true /* BridgeCallBaseContext */> {
8171   using Base = AAArgumentFromCallSiteArguments<
8172       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8173       true /* BridgeCallBaseContext */>;
8174   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8175       : Base(IRP, A) {}
8176 
8177   /// See AbstractAttribute::initialize(..).
8178   void initialize(Attributor &A) override {
8179     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8180       indicatePessimisticFixpoint();
8181     } else {
8182       Base::initialize(A);
8183     }
8184   }
8185 
8186   /// See AbstractAttribute::trackStatistics()
8187   void trackStatistics() const override {
8188     STATS_DECLTRACK_ARG_ATTR(value_range)
8189   }
8190 };
8191 
8192 struct AAValueConstantRangeReturned
8193     : AAReturnedFromReturnedValues<AAValueConstantRange,
8194                                    AAValueConstantRangeImpl,
8195                                    AAValueConstantRangeImpl::StateType,
8196                                    /* PropogateCallBaseContext */ true> {
8197   using Base =
8198       AAReturnedFromReturnedValues<AAValueConstantRange,
8199                                    AAValueConstantRangeImpl,
8200                                    AAValueConstantRangeImpl::StateType,
8201                                    /* PropogateCallBaseContext */ true>;
8202   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8203       : Base(IRP, A) {}
8204 
8205   /// See AbstractAttribute::initialize(...).
8206   void initialize(Attributor &A) override {}
8207 
8208   /// See AbstractAttribute::trackStatistics()
8209   void trackStatistics() const override {
8210     STATS_DECLTRACK_FNRET_ATTR(value_range)
8211   }
8212 };
8213 
8214 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8215   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8216       : AAValueConstantRangeImpl(IRP, A) {}
8217 
8218   /// See AbstractAttribute::initialize(...).
8219   void initialize(Attributor &A) override {
8220     AAValueConstantRangeImpl::initialize(A);
8221     if (isAtFixpoint())
8222       return;
8223 
8224     Value &V = getAssociatedValue();
8225 
8226     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8227       unionAssumed(ConstantRange(C->getValue()));
8228       indicateOptimisticFixpoint();
8229       return;
8230     }
8231 
8232     if (isa<UndefValue>(&V)) {
8233       // Collapse the undef state to 0.
8234       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8235       indicateOptimisticFixpoint();
8236       return;
8237     }
8238 
8239     if (isa<CallBase>(&V))
8240       return;
8241 
8242     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8243       return;
8244 
8245     // If it is a load instruction with range metadata, use it.
8246     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8247       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8248         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8249         return;
8250       }
8251 
8252     // We can work with PHI and select instruction as we traverse their operands
8253     // during update.
8254     if (isa<SelectInst>(V) || isa<PHINode>(V))
8255       return;
8256 
8257     // Otherwise we give up.
8258     indicatePessimisticFixpoint();
8259 
8260     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8261                       << getAssociatedValue() << "\n");
8262   }
8263 
8264   bool calculateBinaryOperator(
8265       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8266       const Instruction *CtxI,
8267       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8268     Value *LHS = BinOp->getOperand(0);
8269     Value *RHS = BinOp->getOperand(1);
8270 
8271     // Simplify the operands first.
8272     bool UsedAssumedInformation = false;
8273     const auto &SimplifiedLHS =
8274         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8275                                *this, UsedAssumedInformation);
8276     if (!SimplifiedLHS.hasValue())
8277       return true;
8278     if (!SimplifiedLHS.getValue())
8279       return false;
8280     LHS = *SimplifiedLHS;
8281 
8282     const auto &SimplifiedRHS =
8283         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8284                                *this, UsedAssumedInformation);
8285     if (!SimplifiedRHS.hasValue())
8286       return true;
8287     if (!SimplifiedRHS.getValue())
8288       return false;
8289     RHS = *SimplifiedRHS;
8290 
8291     // TODO: Allow non integers as well.
8292     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8293       return false;
8294 
8295     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8296         *this, IRPosition::value(*LHS, getCallBaseContext()),
8297         DepClassTy::REQUIRED);
8298     QuerriedAAs.push_back(&LHSAA);
8299     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8300 
8301     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8302         *this, IRPosition::value(*RHS, getCallBaseContext()),
8303         DepClassTy::REQUIRED);
8304     QuerriedAAs.push_back(&RHSAA);
8305     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8306 
8307     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8308 
8309     T.unionAssumed(AssumedRange);
8310 
8311     // TODO: Track a known state too.
8312 
8313     return T.isValidState();
8314   }
8315 
8316   bool calculateCastInst(
8317       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8318       const Instruction *CtxI,
8319       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8320     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8321     // TODO: Allow non integers as well.
8322     Value *OpV = CastI->getOperand(0);
8323 
8324     // Simplify the operand first.
8325     bool UsedAssumedInformation = false;
8326     const auto &SimplifiedOpV =
8327         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8328                                *this, UsedAssumedInformation);
8329     if (!SimplifiedOpV.hasValue())
8330       return true;
8331     if (!SimplifiedOpV.getValue())
8332       return false;
8333     OpV = *SimplifiedOpV;
8334 
8335     if (!OpV->getType()->isIntegerTy())
8336       return false;
8337 
8338     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8339         *this, IRPosition::value(*OpV, getCallBaseContext()),
8340         DepClassTy::REQUIRED);
8341     QuerriedAAs.push_back(&OpAA);
8342     T.unionAssumed(
8343         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8344     return T.isValidState();
8345   }
8346 
8347   bool
8348   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8349                    const Instruction *CtxI,
8350                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8351     Value *LHS = CmpI->getOperand(0);
8352     Value *RHS = CmpI->getOperand(1);
8353 
8354     // Simplify the operands first.
8355     bool UsedAssumedInformation = false;
8356     const auto &SimplifiedLHS =
8357         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8358                                *this, UsedAssumedInformation);
8359     if (!SimplifiedLHS.hasValue())
8360       return true;
8361     if (!SimplifiedLHS.getValue())
8362       return false;
8363     LHS = *SimplifiedLHS;
8364 
8365     const auto &SimplifiedRHS =
8366         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8367                                *this, UsedAssumedInformation);
8368     if (!SimplifiedRHS.hasValue())
8369       return true;
8370     if (!SimplifiedRHS.getValue())
8371       return false;
8372     RHS = *SimplifiedRHS;
8373 
8374     // TODO: Allow non integers as well.
8375     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8376       return false;
8377 
8378     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8379         *this, IRPosition::value(*LHS, getCallBaseContext()),
8380         DepClassTy::REQUIRED);
8381     QuerriedAAs.push_back(&LHSAA);
8382     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8383         *this, IRPosition::value(*RHS, getCallBaseContext()),
8384         DepClassTy::REQUIRED);
8385     QuerriedAAs.push_back(&RHSAA);
8386     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8387     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8388 
8389     // If one of them is empty set, we can't decide.
8390     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8391       return true;
8392 
8393     bool MustTrue = false, MustFalse = false;
8394 
8395     auto AllowedRegion =
8396         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8397 
8398     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8399       MustFalse = true;
8400 
8401     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8402       MustTrue = true;
8403 
8404     assert((!MustTrue || !MustFalse) &&
8405            "Either MustTrue or MustFalse should be false!");
8406 
8407     if (MustTrue)
8408       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8409     else if (MustFalse)
8410       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8411     else
8412       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8413 
8414     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8415                       << " " << RHSAA << "\n");
8416 
8417     // TODO: Track a known state too.
8418     return T.isValidState();
8419   }
8420 
8421   /// See AbstractAttribute::updateImpl(...).
8422   ChangeStatus updateImpl(Attributor &A) override {
8423     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8424                             IntegerRangeState &T, bool Stripped) -> bool {
8425       Instruction *I = dyn_cast<Instruction>(&V);
8426       if (!I || isa<CallBase>(I)) {
8427 
8428         // Simplify the operand first.
8429         bool UsedAssumedInformation = false;
8430         const auto &SimplifiedOpV =
8431             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8432                                    *this, UsedAssumedInformation);
8433         if (!SimplifiedOpV.hasValue())
8434           return true;
8435         if (!SimplifiedOpV.getValue())
8436           return false;
8437         Value *VPtr = *SimplifiedOpV;
8438 
8439         // If the value is not instruction, we query AA to Attributor.
8440         const auto &AA = A.getAAFor<AAValueConstantRange>(
8441             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8442             DepClassTy::REQUIRED);
8443 
8444         // Clamp operator is not used to utilize a program point CtxI.
8445         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8446 
8447         return T.isValidState();
8448       }
8449 
8450       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8451       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8452         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8453           return false;
8454       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8455         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8456           return false;
8457       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8458         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8459           return false;
8460       } else {
8461         // Give up with other instructions.
8462         // TODO: Add other instructions
8463 
8464         T.indicatePessimisticFixpoint();
8465         return false;
8466       }
8467 
8468       // Catch circular reasoning in a pessimistic way for now.
8469       // TODO: Check how the range evolves and if we stripped anything, see also
8470       //       AADereferenceable or AAAlign for similar situations.
8471       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8472         if (QueriedAA != this)
8473           continue;
8474         // If we are in a stady state we do not need to worry.
8475         if (T.getAssumed() == getState().getAssumed())
8476           continue;
8477         T.indicatePessimisticFixpoint();
8478       }
8479 
8480       return T.isValidState();
8481     };
8482 
8483     IntegerRangeState T(getBitWidth());
8484 
8485     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8486                                                   VisitValueCB, getCtxI(),
8487                                                   /* UseValueSimplify */ false))
8488       return indicatePessimisticFixpoint();
8489 
8490     return clampStateAndIndicateChange(getState(), T);
8491   }
8492 
8493   /// See AbstractAttribute::trackStatistics()
8494   void trackStatistics() const override {
8495     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8496   }
8497 };
8498 
8499 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8500   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8501       : AAValueConstantRangeImpl(IRP, A) {}
8502 
8503   /// See AbstractAttribute::initialize(...).
8504   ChangeStatus updateImpl(Attributor &A) override {
8505     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8506                      "not be called");
8507   }
8508 
8509   /// See AbstractAttribute::trackStatistics()
8510   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8511 };
8512 
8513 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8514   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8515       : AAValueConstantRangeFunction(IRP, A) {}
8516 
8517   /// See AbstractAttribute::trackStatistics()
8518   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8519 };
8520 
8521 struct AAValueConstantRangeCallSiteReturned
8522     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8523                                      AAValueConstantRangeImpl,
8524                                      AAValueConstantRangeImpl::StateType,
8525                                      /* IntroduceCallBaseContext */ true> {
8526   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8527       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8528                                        AAValueConstantRangeImpl,
8529                                        AAValueConstantRangeImpl::StateType,
8530                                        /* IntroduceCallBaseContext */ true>(IRP,
8531                                                                             A) {
8532   }
8533 
8534   /// See AbstractAttribute::initialize(...).
8535   void initialize(Attributor &A) override {
8536     // If it is a load instruction with range metadata, use the metadata.
8537     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8538       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8539         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8540 
8541     AAValueConstantRangeImpl::initialize(A);
8542   }
8543 
8544   /// See AbstractAttribute::trackStatistics()
8545   void trackStatistics() const override {
8546     STATS_DECLTRACK_CSRET_ATTR(value_range)
8547   }
8548 };
8549 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8550   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8551       : AAValueConstantRangeFloating(IRP, A) {}
8552 
8553   /// See AbstractAttribute::manifest()
8554   ChangeStatus manifest(Attributor &A) override {
8555     return ChangeStatus::UNCHANGED;
8556   }
8557 
8558   /// See AbstractAttribute::trackStatistics()
8559   void trackStatistics() const override {
8560     STATS_DECLTRACK_CSARG_ATTR(value_range)
8561   }
8562 };
8563 
8564 /// ------------------ Potential Values Attribute -------------------------
8565 
8566 struct AAPotentialValuesImpl : AAPotentialValues {
8567   using StateType = PotentialConstantIntValuesState;
8568 
8569   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8570       : AAPotentialValues(IRP, A) {}
8571 
8572   /// See AbstractAttribute::initialize(..).
8573   void initialize(Attributor &A) override {
8574     if (A.hasSimplificationCallback(getIRPosition()))
8575       indicatePessimisticFixpoint();
8576     else
8577       AAPotentialValues::initialize(A);
8578   }
8579 
8580   /// See AbstractAttribute::getAsStr().
8581   const std::string getAsStr() const override {
8582     std::string Str;
8583     llvm::raw_string_ostream OS(Str);
8584     OS << getState();
8585     return OS.str();
8586   }
8587 
8588   /// See AbstractAttribute::updateImpl(...).
8589   ChangeStatus updateImpl(Attributor &A) override {
8590     return indicatePessimisticFixpoint();
8591   }
8592 };
8593 
8594 struct AAPotentialValuesArgument final
8595     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8596                                       PotentialConstantIntValuesState> {
8597   using Base =
8598       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8599                                       PotentialConstantIntValuesState>;
8600   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8601       : Base(IRP, A) {}
8602 
8603   /// See AbstractAttribute::initialize(..).
8604   void initialize(Attributor &A) override {
8605     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8606       indicatePessimisticFixpoint();
8607     } else {
8608       Base::initialize(A);
8609     }
8610   }
8611 
8612   /// See AbstractAttribute::trackStatistics()
8613   void trackStatistics() const override {
8614     STATS_DECLTRACK_ARG_ATTR(potential_values)
8615   }
8616 };
8617 
8618 struct AAPotentialValuesReturned
8619     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8620   using Base =
8621       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8622   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8623       : Base(IRP, A) {}
8624 
8625   /// See AbstractAttribute::trackStatistics()
8626   void trackStatistics() const override {
8627     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8628   }
8629 };
8630 
8631 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8632   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8633       : AAPotentialValuesImpl(IRP, A) {}
8634 
8635   /// See AbstractAttribute::initialize(..).
8636   void initialize(Attributor &A) override {
8637     AAPotentialValuesImpl::initialize(A);
8638     if (isAtFixpoint())
8639       return;
8640 
8641     Value &V = getAssociatedValue();
8642 
8643     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8644       unionAssumed(C->getValue());
8645       indicateOptimisticFixpoint();
8646       return;
8647     }
8648 
8649     if (isa<UndefValue>(&V)) {
8650       unionAssumedWithUndef();
8651       indicateOptimisticFixpoint();
8652       return;
8653     }
8654 
8655     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8656       return;
8657 
8658     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8659       return;
8660 
8661     indicatePessimisticFixpoint();
8662 
8663     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8664                       << getAssociatedValue() << "\n");
8665   }
8666 
8667   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8668                                 const APInt &RHS) {
8669     ICmpInst::Predicate Pred = ICI->getPredicate();
8670     switch (Pred) {
8671     case ICmpInst::ICMP_UGT:
8672       return LHS.ugt(RHS);
8673     case ICmpInst::ICMP_SGT:
8674       return LHS.sgt(RHS);
8675     case ICmpInst::ICMP_EQ:
8676       return LHS.eq(RHS);
8677     case ICmpInst::ICMP_UGE:
8678       return LHS.uge(RHS);
8679     case ICmpInst::ICMP_SGE:
8680       return LHS.sge(RHS);
8681     case ICmpInst::ICMP_ULT:
8682       return LHS.ult(RHS);
8683     case ICmpInst::ICMP_SLT:
8684       return LHS.slt(RHS);
8685     case ICmpInst::ICMP_NE:
8686       return LHS.ne(RHS);
8687     case ICmpInst::ICMP_ULE:
8688       return LHS.ule(RHS);
8689     case ICmpInst::ICMP_SLE:
8690       return LHS.sle(RHS);
8691     default:
8692       llvm_unreachable("Invalid ICmp predicate!");
8693     }
8694   }
8695 
8696   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8697                                  uint32_t ResultBitWidth) {
8698     Instruction::CastOps CastOp = CI->getOpcode();
8699     switch (CastOp) {
8700     default:
8701       llvm_unreachable("unsupported or not integer cast");
8702     case Instruction::Trunc:
8703       return Src.trunc(ResultBitWidth);
8704     case Instruction::SExt:
8705       return Src.sext(ResultBitWidth);
8706     case Instruction::ZExt:
8707       return Src.zext(ResultBitWidth);
8708     case Instruction::BitCast:
8709       return Src;
8710     }
8711   }
8712 
8713   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8714                                        const APInt &LHS, const APInt &RHS,
8715                                        bool &SkipOperation, bool &Unsupported) {
8716     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8717     // Unsupported is set to true when the binary operator is not supported.
8718     // SkipOperation is set to true when UB occur with the given operand pair
8719     // (LHS, RHS).
8720     // TODO: we should look at nsw and nuw keywords to handle operations
8721     //       that create poison or undef value.
8722     switch (BinOpcode) {
8723     default:
8724       Unsupported = true;
8725       return LHS;
8726     case Instruction::Add:
8727       return LHS + RHS;
8728     case Instruction::Sub:
8729       return LHS - RHS;
8730     case Instruction::Mul:
8731       return LHS * RHS;
8732     case Instruction::UDiv:
8733       if (RHS.isNullValue()) {
8734         SkipOperation = true;
8735         return LHS;
8736       }
8737       return LHS.udiv(RHS);
8738     case Instruction::SDiv:
8739       if (RHS.isNullValue()) {
8740         SkipOperation = true;
8741         return LHS;
8742       }
8743       return LHS.sdiv(RHS);
8744     case Instruction::URem:
8745       if (RHS.isNullValue()) {
8746         SkipOperation = true;
8747         return LHS;
8748       }
8749       return LHS.urem(RHS);
8750     case Instruction::SRem:
8751       if (RHS.isNullValue()) {
8752         SkipOperation = true;
8753         return LHS;
8754       }
8755       return LHS.srem(RHS);
8756     case Instruction::Shl:
8757       return LHS.shl(RHS);
8758     case Instruction::LShr:
8759       return LHS.lshr(RHS);
8760     case Instruction::AShr:
8761       return LHS.ashr(RHS);
8762     case Instruction::And:
8763       return LHS & RHS;
8764     case Instruction::Or:
8765       return LHS | RHS;
8766     case Instruction::Xor:
8767       return LHS ^ RHS;
8768     }
8769   }
8770 
8771   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8772                                            const APInt &LHS, const APInt &RHS) {
8773     bool SkipOperation = false;
8774     bool Unsupported = false;
8775     APInt Result =
8776         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8777     if (Unsupported)
8778       return false;
8779     // If SkipOperation is true, we can ignore this operand pair (L, R).
8780     if (!SkipOperation)
8781       unionAssumed(Result);
8782     return isValidState();
8783   }
8784 
8785   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8786     auto AssumedBefore = getAssumed();
8787     Value *LHS = ICI->getOperand(0);
8788     Value *RHS = ICI->getOperand(1);
8789 
8790     // Simplify the operands first.
8791     bool UsedAssumedInformation = false;
8792     const auto &SimplifiedLHS =
8793         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8794                                *this, UsedAssumedInformation);
8795     if (!SimplifiedLHS.hasValue())
8796       return ChangeStatus::UNCHANGED;
8797     if (!SimplifiedLHS.getValue())
8798       return indicatePessimisticFixpoint();
8799     LHS = *SimplifiedLHS;
8800 
8801     const auto &SimplifiedRHS =
8802         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8803                                *this, UsedAssumedInformation);
8804     if (!SimplifiedRHS.hasValue())
8805       return ChangeStatus::UNCHANGED;
8806     if (!SimplifiedRHS.getValue())
8807       return indicatePessimisticFixpoint();
8808     RHS = *SimplifiedRHS;
8809 
8810     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8811       return indicatePessimisticFixpoint();
8812 
8813     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8814                                                 DepClassTy::REQUIRED);
8815     if (!LHSAA.isValidState())
8816       return indicatePessimisticFixpoint();
8817 
8818     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8819                                                 DepClassTy::REQUIRED);
8820     if (!RHSAA.isValidState())
8821       return indicatePessimisticFixpoint();
8822 
8823     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8824     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8825 
8826     // TODO: make use of undef flag to limit potential values aggressively.
8827     bool MaybeTrue = false, MaybeFalse = false;
8828     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8829     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8830       // The result of any comparison between undefs can be soundly replaced
8831       // with undef.
8832       unionAssumedWithUndef();
8833     } else if (LHSAA.undefIsContained()) {
8834       for (const APInt &R : RHSAAPVS) {
8835         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8836         MaybeTrue |= CmpResult;
8837         MaybeFalse |= !CmpResult;
8838         if (MaybeTrue & MaybeFalse)
8839           return indicatePessimisticFixpoint();
8840       }
8841     } else if (RHSAA.undefIsContained()) {
8842       for (const APInt &L : LHSAAPVS) {
8843         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8844         MaybeTrue |= CmpResult;
8845         MaybeFalse |= !CmpResult;
8846         if (MaybeTrue & MaybeFalse)
8847           return indicatePessimisticFixpoint();
8848       }
8849     } else {
8850       for (const APInt &L : LHSAAPVS) {
8851         for (const APInt &R : RHSAAPVS) {
8852           bool CmpResult = calculateICmpInst(ICI, L, R);
8853           MaybeTrue |= CmpResult;
8854           MaybeFalse |= !CmpResult;
8855           if (MaybeTrue & MaybeFalse)
8856             return indicatePessimisticFixpoint();
8857         }
8858       }
8859     }
8860     if (MaybeTrue)
8861       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8862     if (MaybeFalse)
8863       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8864     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8865                                          : ChangeStatus::CHANGED;
8866   }
8867 
8868   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8869     auto AssumedBefore = getAssumed();
8870     Value *LHS = SI->getTrueValue();
8871     Value *RHS = SI->getFalseValue();
8872 
8873     // Simplify the operands first.
8874     bool UsedAssumedInformation = false;
8875     const auto &SimplifiedLHS =
8876         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8877                                *this, UsedAssumedInformation);
8878     if (!SimplifiedLHS.hasValue())
8879       return ChangeStatus::UNCHANGED;
8880     if (!SimplifiedLHS.getValue())
8881       return indicatePessimisticFixpoint();
8882     LHS = *SimplifiedLHS;
8883 
8884     const auto &SimplifiedRHS =
8885         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8886                                *this, UsedAssumedInformation);
8887     if (!SimplifiedRHS.hasValue())
8888       return ChangeStatus::UNCHANGED;
8889     if (!SimplifiedRHS.getValue())
8890       return indicatePessimisticFixpoint();
8891     RHS = *SimplifiedRHS;
8892 
8893     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8894       return indicatePessimisticFixpoint();
8895 
8896     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8897                                                   UsedAssumedInformation);
8898 
8899     // Check if we only need one operand.
8900     bool OnlyLeft = false, OnlyRight = false;
8901     if (C.hasValue() && *C && (*C)->isOneValue())
8902       OnlyLeft = true;
8903     else if (C.hasValue() && *C && (*C)->isZeroValue())
8904       OnlyRight = true;
8905 
8906     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8907     if (!OnlyRight) {
8908       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8909                                              DepClassTy::REQUIRED);
8910       if (!LHSAA->isValidState())
8911         return indicatePessimisticFixpoint();
8912     }
8913     if (!OnlyLeft) {
8914       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8915                                              DepClassTy::REQUIRED);
8916       if (!RHSAA->isValidState())
8917         return indicatePessimisticFixpoint();
8918     }
8919 
8920     if (!LHSAA || !RHSAA) {
8921       // select (true/false), lhs, rhs
8922       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8923 
8924       if (OpAA->undefIsContained())
8925         unionAssumedWithUndef();
8926       else
8927         unionAssumed(*OpAA);
8928 
8929     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8930       // select i1 *, undef , undef => undef
8931       unionAssumedWithUndef();
8932     } else {
8933       unionAssumed(*LHSAA);
8934       unionAssumed(*RHSAA);
8935     }
8936     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8937                                          : ChangeStatus::CHANGED;
8938   }
8939 
8940   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8941     auto AssumedBefore = getAssumed();
8942     if (!CI->isIntegerCast())
8943       return indicatePessimisticFixpoint();
8944     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8945     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8946     Value *Src = CI->getOperand(0);
8947 
8948     // Simplify the operand first.
8949     bool UsedAssumedInformation = false;
8950     const auto &SimplifiedSrc =
8951         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8952                                *this, UsedAssumedInformation);
8953     if (!SimplifiedSrc.hasValue())
8954       return ChangeStatus::UNCHANGED;
8955     if (!SimplifiedSrc.getValue())
8956       return indicatePessimisticFixpoint();
8957     Src = *SimplifiedSrc;
8958 
8959     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8960                                                 DepClassTy::REQUIRED);
8961     if (!SrcAA.isValidState())
8962       return indicatePessimisticFixpoint();
8963     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8964     if (SrcAA.undefIsContained())
8965       unionAssumedWithUndef();
8966     else {
8967       for (const APInt &S : SrcAAPVS) {
8968         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8969         unionAssumed(T);
8970       }
8971     }
8972     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8973                                          : ChangeStatus::CHANGED;
8974   }
8975 
8976   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8977     auto AssumedBefore = getAssumed();
8978     Value *LHS = BinOp->getOperand(0);
8979     Value *RHS = BinOp->getOperand(1);
8980 
8981     // Simplify the operands first.
8982     bool UsedAssumedInformation = false;
8983     const auto &SimplifiedLHS =
8984         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8985                                *this, UsedAssumedInformation);
8986     if (!SimplifiedLHS.hasValue())
8987       return ChangeStatus::UNCHANGED;
8988     if (!SimplifiedLHS.getValue())
8989       return indicatePessimisticFixpoint();
8990     LHS = *SimplifiedLHS;
8991 
8992     const auto &SimplifiedRHS =
8993         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8994                                *this, UsedAssumedInformation);
8995     if (!SimplifiedRHS.hasValue())
8996       return ChangeStatus::UNCHANGED;
8997     if (!SimplifiedRHS.getValue())
8998       return indicatePessimisticFixpoint();
8999     RHS = *SimplifiedRHS;
9000 
9001     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
9002       return indicatePessimisticFixpoint();
9003 
9004     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
9005                                                 DepClassTy::REQUIRED);
9006     if (!LHSAA.isValidState())
9007       return indicatePessimisticFixpoint();
9008 
9009     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
9010                                                 DepClassTy::REQUIRED);
9011     if (!RHSAA.isValidState())
9012       return indicatePessimisticFixpoint();
9013 
9014     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9015     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9016     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9017 
9018     // TODO: make use of undef flag to limit potential values aggressively.
9019     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9020       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9021         return indicatePessimisticFixpoint();
9022     } else if (LHSAA.undefIsContained()) {
9023       for (const APInt &R : RHSAAPVS) {
9024         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9025           return indicatePessimisticFixpoint();
9026       }
9027     } else if (RHSAA.undefIsContained()) {
9028       for (const APInt &L : LHSAAPVS) {
9029         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9030           return indicatePessimisticFixpoint();
9031       }
9032     } else {
9033       for (const APInt &L : LHSAAPVS) {
9034         for (const APInt &R : RHSAAPVS) {
9035           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9036             return indicatePessimisticFixpoint();
9037         }
9038       }
9039     }
9040     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9041                                          : ChangeStatus::CHANGED;
9042   }
9043 
9044   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9045     auto AssumedBefore = getAssumed();
9046     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9047       Value *IncomingValue = PHI->getIncomingValue(u);
9048 
9049       // Simplify the operand first.
9050       bool UsedAssumedInformation = false;
9051       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9052           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9053           UsedAssumedInformation);
9054       if (!SimplifiedIncomingValue.hasValue())
9055         continue;
9056       if (!SimplifiedIncomingValue.getValue())
9057         return indicatePessimisticFixpoint();
9058       IncomingValue = *SimplifiedIncomingValue;
9059 
9060       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9061           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9062       if (!PotentialValuesAA.isValidState())
9063         return indicatePessimisticFixpoint();
9064       if (PotentialValuesAA.undefIsContained())
9065         unionAssumedWithUndef();
9066       else
9067         unionAssumed(PotentialValuesAA.getAssumed());
9068     }
9069     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9070                                          : ChangeStatus::CHANGED;
9071   }
9072 
9073   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9074     if (!L.getType()->isIntegerTy())
9075       return indicatePessimisticFixpoint();
9076 
9077     auto Union = [&](Value &V) {
9078       if (isa<UndefValue>(V)) {
9079         unionAssumedWithUndef();
9080         return true;
9081       }
9082       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9083         unionAssumed(CI->getValue());
9084         return true;
9085       }
9086       return false;
9087     };
9088     auto AssumedBefore = getAssumed();
9089 
9090     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9091       return indicatePessimisticFixpoint();
9092 
9093     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9094                                          : ChangeStatus::CHANGED;
9095   }
9096 
9097   /// See AbstractAttribute::updateImpl(...).
9098   ChangeStatus updateImpl(Attributor &A) override {
9099     Value &V = getAssociatedValue();
9100     Instruction *I = dyn_cast<Instruction>(&V);
9101 
9102     if (auto *ICI = dyn_cast<ICmpInst>(I))
9103       return updateWithICmpInst(A, ICI);
9104 
9105     if (auto *SI = dyn_cast<SelectInst>(I))
9106       return updateWithSelectInst(A, SI);
9107 
9108     if (auto *CI = dyn_cast<CastInst>(I))
9109       return updateWithCastInst(A, CI);
9110 
9111     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9112       return updateWithBinaryOperator(A, BinOp);
9113 
9114     if (auto *PHI = dyn_cast<PHINode>(I))
9115       return updateWithPHINode(A, PHI);
9116 
9117     if (auto *L = dyn_cast<LoadInst>(I))
9118       return updateWithLoad(A, *L);
9119 
9120     return indicatePessimisticFixpoint();
9121   }
9122 
9123   /// See AbstractAttribute::trackStatistics()
9124   void trackStatistics() const override {
9125     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9126   }
9127 };
9128 
9129 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9130   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9131       : AAPotentialValuesImpl(IRP, A) {}
9132 
9133   /// See AbstractAttribute::initialize(...).
9134   ChangeStatus updateImpl(Attributor &A) override {
9135     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9136                      "not be called");
9137   }
9138 
9139   /// See AbstractAttribute::trackStatistics()
9140   void trackStatistics() const override {
9141     STATS_DECLTRACK_FN_ATTR(potential_values)
9142   }
9143 };
9144 
9145 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9146   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9147       : AAPotentialValuesFunction(IRP, A) {}
9148 
9149   /// See AbstractAttribute::trackStatistics()
9150   void trackStatistics() const override {
9151     STATS_DECLTRACK_CS_ATTR(potential_values)
9152   }
9153 };
9154 
9155 struct AAPotentialValuesCallSiteReturned
9156     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9157   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9158       : AACallSiteReturnedFromReturned<AAPotentialValues,
9159                                        AAPotentialValuesImpl>(IRP, A) {}
9160 
9161   /// See AbstractAttribute::trackStatistics()
9162   void trackStatistics() const override {
9163     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9164   }
9165 };
9166 
9167 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9168   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9169       : AAPotentialValuesFloating(IRP, A) {}
9170 
9171   /// See AbstractAttribute::initialize(..).
9172   void initialize(Attributor &A) override {
9173     AAPotentialValuesImpl::initialize(A);
9174     if (isAtFixpoint())
9175       return;
9176 
9177     Value &V = getAssociatedValue();
9178 
9179     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9180       unionAssumed(C->getValue());
9181       indicateOptimisticFixpoint();
9182       return;
9183     }
9184 
9185     if (isa<UndefValue>(&V)) {
9186       unionAssumedWithUndef();
9187       indicateOptimisticFixpoint();
9188       return;
9189     }
9190   }
9191 
9192   /// See AbstractAttribute::updateImpl(...).
9193   ChangeStatus updateImpl(Attributor &A) override {
9194     Value &V = getAssociatedValue();
9195     auto AssumedBefore = getAssumed();
9196     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9197                                              DepClassTy::REQUIRED);
9198     const auto &S = AA.getAssumed();
9199     unionAssumed(S);
9200     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9201                                          : ChangeStatus::CHANGED;
9202   }
9203 
9204   /// See AbstractAttribute::trackStatistics()
9205   void trackStatistics() const override {
9206     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9207   }
9208 };
9209 
9210 /// ------------------------ NoUndef Attribute ---------------------------------
9211 struct AANoUndefImpl : AANoUndef {
9212   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9213 
9214   /// See AbstractAttribute::initialize(...).
9215   void initialize(Attributor &A) override {
9216     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9217       indicateOptimisticFixpoint();
9218       return;
9219     }
9220     Value &V = getAssociatedValue();
9221     if (isa<UndefValue>(V))
9222       indicatePessimisticFixpoint();
9223     else if (isa<FreezeInst>(V))
9224       indicateOptimisticFixpoint();
9225     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9226              isGuaranteedNotToBeUndefOrPoison(&V))
9227       indicateOptimisticFixpoint();
9228     else
9229       AANoUndef::initialize(A);
9230   }
9231 
9232   /// See followUsesInMBEC
9233   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9234                        AANoUndef::StateType &State) {
9235     const Value *UseV = U->get();
9236     const DominatorTree *DT = nullptr;
9237     AssumptionCache *AC = nullptr;
9238     InformationCache &InfoCache = A.getInfoCache();
9239     if (Function *F = getAnchorScope()) {
9240       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9241       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9242     }
9243     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9244     bool TrackUse = false;
9245     // Track use for instructions which must produce undef or poison bits when
9246     // at least one operand contains such bits.
9247     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9248       TrackUse = true;
9249     return TrackUse;
9250   }
9251 
9252   /// See AbstractAttribute::getAsStr().
9253   const std::string getAsStr() const override {
9254     return getAssumed() ? "noundef" : "may-undef-or-poison";
9255   }
9256 
9257   ChangeStatus manifest(Attributor &A) override {
9258     // We don't manifest noundef attribute for dead positions because the
9259     // associated values with dead positions would be replaced with undef
9260     // values.
9261     bool UsedAssumedInformation = false;
9262     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9263                         UsedAssumedInformation))
9264       return ChangeStatus::UNCHANGED;
9265     // A position whose simplified value does not have any value is
9266     // considered to be dead. We don't manifest noundef in such positions for
9267     // the same reason above.
9268     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9269              .hasValue())
9270       return ChangeStatus::UNCHANGED;
9271     return AANoUndef::manifest(A);
9272   }
9273 };
9274 
9275 struct AANoUndefFloating : public AANoUndefImpl {
9276   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9277       : AANoUndefImpl(IRP, A) {}
9278 
9279   /// See AbstractAttribute::initialize(...).
9280   void initialize(Attributor &A) override {
9281     AANoUndefImpl::initialize(A);
9282     if (!getState().isAtFixpoint())
9283       if (Instruction *CtxI = getCtxI())
9284         followUsesInMBEC(*this, A, getState(), *CtxI);
9285   }
9286 
9287   /// See AbstractAttribute::updateImpl(...).
9288   ChangeStatus updateImpl(Attributor &A) override {
9289     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9290                             AANoUndef::StateType &T, bool Stripped) -> bool {
9291       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9292                                              DepClassTy::REQUIRED);
9293       if (!Stripped && this == &AA) {
9294         T.indicatePessimisticFixpoint();
9295       } else {
9296         const AANoUndef::StateType &S =
9297             static_cast<const AANoUndef::StateType &>(AA.getState());
9298         T ^= S;
9299       }
9300       return T.isValidState();
9301     };
9302 
9303     StateType T;
9304     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9305                                           VisitValueCB, getCtxI()))
9306       return indicatePessimisticFixpoint();
9307 
9308     return clampStateAndIndicateChange(getState(), T);
9309   }
9310 
9311   /// See AbstractAttribute::trackStatistics()
9312   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9313 };
9314 
9315 struct AANoUndefReturned final
9316     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9317   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9318       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9319 
9320   /// See AbstractAttribute::trackStatistics()
9321   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9322 };
9323 
9324 struct AANoUndefArgument final
9325     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9326   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9327       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9328 
9329   /// See AbstractAttribute::trackStatistics()
9330   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9331 };
9332 
9333 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9334   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9335       : AANoUndefFloating(IRP, A) {}
9336 
9337   /// See AbstractAttribute::trackStatistics()
9338   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9339 };
9340 
9341 struct AANoUndefCallSiteReturned final
9342     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9343   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9344       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9345 
9346   /// See AbstractAttribute::trackStatistics()
9347   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9348 };
9349 
9350 struct AACallEdgesFunction : public AACallEdges {
9351   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9352       : AACallEdges(IRP, A) {}
9353 
9354   /// See AbstractAttribute::updateImpl(...).
9355   ChangeStatus updateImpl(Attributor &A) override {
9356     ChangeStatus Change = ChangeStatus::UNCHANGED;
9357     bool OldHasUnknownCallee = HasUnknownCallee;
9358     bool OldHasUnknownCalleeNonAsm = HasUnknownCalleeNonAsm;
9359 
9360     auto AddCalledFunction = [&](Function *Fn) {
9361       if (CalledFunctions.insert(Fn)) {
9362         Change = ChangeStatus::CHANGED;
9363         LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9364                           << "\n");
9365       }
9366     };
9367 
9368     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9369                           bool Stripped) -> bool {
9370       if (Function *Fn = dyn_cast<Function>(&V)) {
9371         AddCalledFunction(Fn);
9372       } else {
9373         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9374         HasUnknown = true;
9375         HasUnknownCalleeNonAsm = true;
9376       }
9377 
9378       // Explore all values.
9379       return true;
9380     };
9381 
9382     // Process any value that we might call.
9383     auto ProcessCalledOperand = [&](Value *V, Instruction *Ctx) {
9384       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9385                                        HasUnknownCallee, VisitValue, nullptr,
9386                                        false)) {
9387         // If we haven't gone through all values, assume that there are unknown
9388         // callees.
9389         HasUnknownCallee = true;
9390         HasUnknownCalleeNonAsm = true;
9391       }
9392     };
9393 
9394     auto ProcessCallInst = [&](Instruction &Inst) {
9395       CallBase &CB = static_cast<CallBase &>(Inst);
9396       if (CB.isInlineAsm()) {
9397         HasUnknownCallee = true;
9398         return true;
9399       }
9400 
9401       // Process callee metadata if available.
9402       if (auto *MD = Inst.getMetadata(LLVMContext::MD_callees)) {
9403         for (auto &Op : MD->operands()) {
9404           Function *Callee = mdconst::extract_or_null<Function>(Op);
9405           if (Callee)
9406             AddCalledFunction(Callee);
9407         }
9408         // Callees metadata grantees that the called function is one of its
9409         // operands, So we are done.
9410         return true;
9411       }
9412 
9413       // The most simple case.
9414       ProcessCalledOperand(CB.getCalledOperand(), &Inst);
9415 
9416       // Process callback functions.
9417       SmallVector<const Use *, 4u> CallbackUses;
9418       AbstractCallSite::getCallbackUses(CB, CallbackUses);
9419       for (const Use *U : CallbackUses)
9420         ProcessCalledOperand(U->get(), &Inst);
9421 
9422       return true;
9423     };
9424 
9425     // Visit all callable instructions.
9426     bool UsedAssumedInformation = false;
9427     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9428                                            UsedAssumedInformation)) {
9429       // If we haven't looked at all call like instructions, assume that there
9430       // are unknown callees.
9431       HasUnknownCallee = true;
9432       HasUnknownCalleeNonAsm = true;
9433     }
9434 
9435     // Track changes.
9436     if (OldHasUnknownCallee != HasUnknownCallee ||
9437         OldHasUnknownCalleeNonAsm != HasUnknownCalleeNonAsm)
9438       Change = ChangeStatus::CHANGED;
9439 
9440     return Change;
9441   }
9442 
9443   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9444     return CalledFunctions;
9445   };
9446 
9447   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9448 
9449   virtual bool hasNonAsmUnknownCallee() const override {
9450     return HasUnknownCalleeNonAsm;
9451   }
9452 
9453   const std::string getAsStr() const override {
9454     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9455            std::to_string(CalledFunctions.size()) + "]";
9456   }
9457 
9458   void trackStatistics() const override {}
9459 
9460   /// Optimistic set of functions that might be called by this function.
9461   SetVector<Function *> CalledFunctions;
9462 
9463   /// Is there any call with a unknown callee.
9464   bool HasUnknownCallee = false;
9465 
9466   /// Is there any call with a unknown callee, excluding any inline asm.
9467   bool HasUnknownCalleeNonAsm = false;
9468 };
9469 
9470 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9471   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9472       : AAFunctionReachability(IRP, A) {}
9473 
9474   bool canReach(Attributor &A, Function *Fn) const override {
9475     // Assume that we can reach any function if we can reach a call with
9476     // unknown callee.
9477     if (CanReachUnknownCallee)
9478       return true;
9479 
9480     if (ReachableQueries.count(Fn))
9481       return true;
9482 
9483     if (UnreachableQueries.count(Fn))
9484       return false;
9485 
9486     const AACallEdges &AAEdges =
9487         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9488 
9489     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9490     bool Result = checkIfReachable(A, Edges, Fn);
9491 
9492     // Attributor returns attributes as const, so this function has to be
9493     // const for users of this attribute to use it without having to do
9494     // a const_cast.
9495     // This is a hack for us to be able to cache queries.
9496     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9497 
9498     if (Result)
9499       NonConstThis->ReachableQueries.insert(Fn);
9500     else
9501       NonConstThis->UnreachableQueries.insert(Fn);
9502 
9503     return Result;
9504   }
9505 
9506   /// See AbstractAttribute::updateImpl(...).
9507   ChangeStatus updateImpl(Attributor &A) override {
9508     if (CanReachUnknownCallee)
9509       return ChangeStatus::UNCHANGED;
9510 
9511     const AACallEdges &AAEdges =
9512         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9513     const SetVector<Function *> &Edges = AAEdges.getOptimisticEdges();
9514     ChangeStatus Change = ChangeStatus::UNCHANGED;
9515 
9516     if (AAEdges.hasUnknownCallee()) {
9517       bool OldCanReachUnknown = CanReachUnknownCallee;
9518       CanReachUnknownCallee = true;
9519       return OldCanReachUnknown ? ChangeStatus::UNCHANGED
9520                                 : ChangeStatus::CHANGED;
9521     }
9522 
9523     // Check if any of the unreachable functions become reachable.
9524     for (auto Current = UnreachableQueries.begin();
9525          Current != UnreachableQueries.end();) {
9526       if (!checkIfReachable(A, Edges, *Current)) {
9527         Current++;
9528         continue;
9529       }
9530       ReachableQueries.insert(*Current);
9531       UnreachableQueries.erase(*Current++);
9532       Change = ChangeStatus::CHANGED;
9533     }
9534 
9535     return Change;
9536   }
9537 
9538   const std::string getAsStr() const override {
9539     size_t QueryCount = ReachableQueries.size() + UnreachableQueries.size();
9540 
9541     return "FunctionReachability [" + std::to_string(ReachableQueries.size()) +
9542            "," + std::to_string(QueryCount) + "]";
9543   }
9544 
9545   void trackStatistics() const override {}
9546 
9547 private:
9548   bool canReachUnknownCallee() const override { return CanReachUnknownCallee; }
9549 
9550   bool checkIfReachable(Attributor &A, const SetVector<Function *> &Edges,
9551                         Function *Fn) const {
9552     if (Edges.count(Fn))
9553       return true;
9554 
9555     for (Function *Edge : Edges) {
9556       // We don't need a dependency if the result is reachable.
9557       const AAFunctionReachability &EdgeReachability =
9558           A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Edge),
9559                                              DepClassTy::NONE);
9560 
9561       if (EdgeReachability.canReach(A, Fn))
9562         return true;
9563     }
9564     for (Function *Fn : Edges)
9565       A.getAAFor<AAFunctionReachability>(*this, IRPosition::function(*Fn),
9566                                          DepClassTy::REQUIRED);
9567 
9568     return false;
9569   }
9570 
9571   /// Set of functions that we know for sure is reachable.
9572   SmallPtrSet<Function *, 8> ReachableQueries;
9573 
9574   /// Set of functions that are unreachable, but might become reachable.
9575   SmallPtrSet<Function *, 8> UnreachableQueries;
9576 
9577   /// If we can reach a function with a call to a unknown function we assume
9578   /// that we can reach any function.
9579   bool CanReachUnknownCallee = false;
9580 };
9581 
9582 } // namespace
9583 
9584 AACallGraphNode *AACallEdgeIterator::operator*() const {
9585   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9586       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9587 }
9588 
9589 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9590 
9591 const char AAReturnedValues::ID = 0;
9592 const char AANoUnwind::ID = 0;
9593 const char AANoSync::ID = 0;
9594 const char AANoFree::ID = 0;
9595 const char AANonNull::ID = 0;
9596 const char AANoRecurse::ID = 0;
9597 const char AAWillReturn::ID = 0;
9598 const char AAUndefinedBehavior::ID = 0;
9599 const char AANoAlias::ID = 0;
9600 const char AAReachability::ID = 0;
9601 const char AANoReturn::ID = 0;
9602 const char AAIsDead::ID = 0;
9603 const char AADereferenceable::ID = 0;
9604 const char AAAlign::ID = 0;
9605 const char AANoCapture::ID = 0;
9606 const char AAValueSimplify::ID = 0;
9607 const char AAHeapToStack::ID = 0;
9608 const char AAPrivatizablePtr::ID = 0;
9609 const char AAMemoryBehavior::ID = 0;
9610 const char AAMemoryLocation::ID = 0;
9611 const char AAValueConstantRange::ID = 0;
9612 const char AAPotentialValues::ID = 0;
9613 const char AANoUndef::ID = 0;
9614 const char AACallEdges::ID = 0;
9615 const char AAFunctionReachability::ID = 0;
9616 const char AAPointerInfo::ID = 0;
9617 
9618 // Macro magic to create the static generator function for attributes that
9619 // follow the naming scheme.
9620 
9621 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9622   case IRPosition::PK:                                                         \
9623     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9624 
9625 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9626   case IRPosition::PK:                                                         \
9627     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9628     ++NumAAs;                                                                  \
9629     break;
9630 
9631 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9632   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9633     CLASS *AA = nullptr;                                                       \
9634     switch (IRP.getPositionKind()) {                                           \
9635       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9636       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9637       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9638       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9639       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9640       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9641       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9642       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9643     }                                                                          \
9644     return *AA;                                                                \
9645   }
9646 
9647 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9648   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9649     CLASS *AA = nullptr;                                                       \
9650     switch (IRP.getPositionKind()) {                                           \
9651       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9652       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9653       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9654       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9655       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9656       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9657       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9658       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9659     }                                                                          \
9660     return *AA;                                                                \
9661   }
9662 
9663 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9664   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9665     CLASS *AA = nullptr;                                                       \
9666     switch (IRP.getPositionKind()) {                                           \
9667       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9668       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9669       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9670       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9671       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9672       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9673       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9674       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9675     }                                                                          \
9676     return *AA;                                                                \
9677   }
9678 
9679 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9680   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9681     CLASS *AA = nullptr;                                                       \
9682     switch (IRP.getPositionKind()) {                                           \
9683       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9684       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9685       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9686       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9687       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9688       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9689       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9690       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9691     }                                                                          \
9692     return *AA;                                                                \
9693   }
9694 
9695 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9696   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9697     CLASS *AA = nullptr;                                                       \
9698     switch (IRP.getPositionKind()) {                                           \
9699       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9700       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9701       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9702       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9703       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9704       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9705       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9706       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9707     }                                                                          \
9708     return *AA;                                                                \
9709   }
9710 
9711 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9712 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9713 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9714 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9715 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9716 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9717 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9718 
9719 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9720 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9721 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9722 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9723 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9724 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9725 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9726 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9727 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9728 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9729 
9730 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9731 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9732 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9733 
9734 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9735 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9736 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9737 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9738 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9739 
9740 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9741 
9742 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9743 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9744 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9745 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9746 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9747 #undef SWITCH_PK_CREATE
9748 #undef SWITCH_PK_INV
9749