1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LazyValueInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/FileSystem.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
51 static cl::opt<bool> ManifestInternal(
52     "attributor-manifest-internal", cl::Hidden,
53     cl::desc("Manifest Attributor internal string attributes."),
54     cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57                                        cl::Hidden);
58 
59 template <>
60 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
61 
62 static cl::opt<unsigned, true> MaxPotentialValues(
63     "attributor-max-potential-values", cl::Hidden,
64     cl::desc("Maximum number of potential values to be "
65              "tracked for each position."),
66     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
67     cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 //  void trackStatistics() const override {
78 //    STATS_DECLTRACK_ARG_ATTR(returned)
79 //  }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
85   ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG)                                            \
89   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
92   {                                                                            \
93     STATS_DECL(NAME, TYPE, MSG)                                                \
94     STATS_TRACK(NAME, TYPE)                                                    \
95   }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
97   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSArguments,                                           \
100                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
107                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSReturn,                                              \
110                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
112   STATS_DECLTRACK(NAME, Floating,                                              \
113                   ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS)                                                   \
119   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
120     return OS << static_cast<const AbstractAttribute &>(AA);                   \
121   }
122 
123 PIPE_OPERATOR(AAIsDead)
124 PIPE_OPERATOR(AANoUnwind)
125 PIPE_OPERATOR(AANoSync)
126 PIPE_OPERATOR(AANoRecurse)
127 PIPE_OPERATOR(AAWillReturn)
128 PIPE_OPERATOR(AANoReturn)
129 PIPE_OPERATOR(AAReturnedValues)
130 PIPE_OPERATOR(AANonNull)
131 PIPE_OPERATOR(AANoAlias)
132 PIPE_OPERATOR(AADereferenceable)
133 PIPE_OPERATOR(AAAlign)
134 PIPE_OPERATOR(AANoCapture)
135 PIPE_OPERATOR(AAValueSimplify)
136 PIPE_OPERATOR(AANoFree)
137 PIPE_OPERATOR(AAHeapToStack)
138 PIPE_OPERATOR(AAReachability)
139 PIPE_OPERATOR(AAMemoryBehavior)
140 PIPE_OPERATOR(AAMemoryLocation)
141 PIPE_OPERATOR(AAValueConstantRange)
142 PIPE_OPERATOR(AAPrivatizablePtr)
143 PIPE_OPERATOR(AAUndefinedBehavior)
144 PIPE_OPERATOR(AAPotentialValues)
145 PIPE_OPERATOR(AANoUndef)
146 PIPE_OPERATOR(AACallEdges)
147 PIPE_OPERATOR(AAFunctionReachability)
148 PIPE_OPERATOR(AAPointerInfo)
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
153 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
154                                                      const DerefState &R) {
155   ChangeStatus CS0 =
156       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158   return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167                                       bool AllowVolatile) {
168   if (!AllowVolatile && I->isVolatile())
169     return nullptr;
170 
171   if (auto *LI = dyn_cast<LoadInst>(I)) {
172     return LI->getPointerOperand();
173   }
174 
175   if (auto *SI = dyn_cast<StoreInst>(I)) {
176     return SI->getPointerOperand();
177   }
178 
179   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180     return CXI->getPointerOperand();
181   }
182 
183   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184     return RMWI->getPointerOperand();
185   }
186 
187   return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 ///       already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199                                int64_t Offset, IRBuilder<NoFolder> &IRB,
200                                const DataLayout &DL) {
201   assert(Offset >= 0 && "Negative offset not supported yet!");
202   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203                     << "-bytes as " << *ResTy << "\n");
204 
205   if (Offset) {
206     Type *Ty = PtrElemTy;
207     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
208     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
209 
210     SmallVector<Value *, 4> ValIndices;
211     std::string GEPName = Ptr->getName().str();
212     for (const APInt &Index : IntIndices) {
213       ValIndices.push_back(IRB.getInt(Index));
214       GEPName += "." + std::to_string(Index.getZExtValue());
215     }
216 
217     // Create a GEP for the indices collected above.
218     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
219 
220     // If an offset is left we use byte-wise adjustment.
221     if (IntOffset != 0) {
222       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
223       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
224                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
225     }
226   }
227 
228   // Ensure the result has the requested type.
229   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
230 
231   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
232   return Ptr;
233 }
234 
235 /// Recursively visit all values that might become \p IRP at some point. This
236 /// will be done by looking through cast instructions, selects, phis, and calls
237 /// with the "returned" attribute. Once we cannot look through the value any
238 /// further, the callback \p VisitValueCB is invoked and passed the current
239 /// value, the \p State, and a flag to indicate if we stripped anything.
240 /// Stripped means that we unpacked the value associated with \p IRP at least
241 /// once. Note that the value used for the callback may still be the value
242 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
243 /// we will never visit more values than specified by \p MaxValues.
244 template <typename StateTy>
245 static bool genericValueTraversal(
246     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
247     StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA,
257         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
258         DepClassTy::NONE);
259   bool AnyDead = false;
260 
261   Value *InitialV = &IRP.getAssociatedValue();
262   using Item = std::pair<Value *, const Instruction *>;
263   SmallSet<Item, 16> Visited;
264   SmallVector<Item, 16> Worklist;
265   Worklist.push_back({InitialV, CtxI});
266 
267   int Iteration = 0;
268   do {
269     Item I = Worklist.pop_back_val();
270     Value *V = I.first;
271     CtxI = I.second;
272     if (StripCB)
273       V = StripCB(V);
274 
275     // Check if we should process the current value. To prevent endless
276     // recursion keep a record of the values we followed!
277     if (!Visited.insert(I).second)
278       continue;
279 
280     // Make sure we limit the compile time for complex expressions.
281     if (Iteration++ >= MaxValues)
282       return false;
283 
284     // Explicitly look through calls with a "returned" attribute if we do
285     // not have a pointer as stripPointerCasts only works on them.
286     Value *NewV = nullptr;
287     if (V->getType()->isPointerTy()) {
288       NewV = V->stripPointerCasts();
289     } else {
290       auto *CB = dyn_cast<CallBase>(V);
291       if (CB && CB->getCalledFunction()) {
292         for (Argument &Arg : CB->getCalledFunction()->args())
293           if (Arg.hasReturnedAttr()) {
294             NewV = CB->getArgOperand(Arg.getArgNo());
295             break;
296           }
297       }
298     }
299     if (NewV && NewV != V) {
300       Worklist.push_back({NewV, CtxI});
301       continue;
302     }
303 
304     // Look through select instructions, visit assumed potential values.
305     if (auto *SI = dyn_cast<SelectInst>(V)) {
306       bool UsedAssumedInformation = false;
307       Optional<Constant *> C = A.getAssumedConstant(
308           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
309       bool NoValueYet = !C.hasValue();
310       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
311         continue;
312       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
313         if (CI->isZero())
314           Worklist.push_back({SI->getFalseValue(), CtxI});
315         else
316           Worklist.push_back({SI->getTrueValue(), CtxI});
317         continue;
318       }
319       // We could not simplify the condition, assume both values.(
320       Worklist.push_back({SI->getTrueValue(), CtxI});
321       Worklist.push_back({SI->getFalseValue(), CtxI});
322       continue;
323     }
324 
325     // Look through phi nodes, visit all live operands.
326     if (auto *PHI = dyn_cast<PHINode>(V)) {
327       assert(LivenessAA &&
328              "Expected liveness in the presence of instructions!");
329       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
330         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
331         bool UsedAssumedInformation = false;
332         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
333                             LivenessAA, UsedAssumedInformation,
334                             /* CheckBBLivenessOnly */ true)) {
335           AnyDead = true;
336           continue;
337         }
338         Worklist.push_back(
339             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
340       }
341       continue;
342     }
343 
344     if (UseValueSimplify && !isa<Constant>(V)) {
345       bool UsedAssumedInformation = false;
346       Optional<Value *> SimpleV =
347           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
348       if (!SimpleV.hasValue())
349         continue;
350       if (!SimpleV.getValue())
351         return false;
352       Value *NewV = SimpleV.getValue();
353       if (NewV != V) {
354         Worklist.push_back({NewV, CtxI});
355         continue;
356       }
357     }
358 
359     // Once a leaf is reached we inform the user through the callback.
360     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
361       return false;
362   } while (!Worklist.empty());
363 
364   // If we actually used liveness information so we have to record a dependence.
365   if (AnyDead)
366     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
367 
368   // All values have been visited.
369   return true;
370 }
371 
372 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
373                                      SmallVectorImpl<Value *> &Objects,
374                                      const AbstractAttribute &QueryingAA,
375                                      const Instruction *CtxI) {
376   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
377   SmallPtrSet<Value *, 8> SeenObjects;
378   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
379                                      SmallVectorImpl<Value *> &Objects,
380                                      bool) -> bool {
381     if (SeenObjects.insert(&Val).second)
382       Objects.push_back(&Val);
383     return true;
384   };
385   if (!genericValueTraversal<decltype(Objects)>(
386           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
387           true, 32, StripCB))
388     return false;
389   return true;
390 }
391 
392 const Value *stripAndAccumulateMinimalOffsets(
393     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
394     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
395     bool UseAssumed = false) {
396 
397   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
398     const IRPosition &Pos = IRPosition::value(V);
399     // Only track dependence if we are going to use the assumed info.
400     const AAValueConstantRange &ValueConstantRangeAA =
401         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
402                                          UseAssumed ? DepClassTy::OPTIONAL
403                                                     : DepClassTy::NONE);
404     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
405                                      : ValueConstantRangeAA.getKnown();
406     // We can only use the lower part of the range because the upper part can
407     // be higher than what the value can really be.
408     ROffset = Range.getSignedMin();
409     return true;
410   };
411 
412   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
413                                                 AttributorAnalysis);
414 }
415 
416 static const Value *getMinimalBaseOfAccsesPointerOperand(
417     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
418     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
419   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
420   if (!Ptr)
421     return nullptr;
422   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
423   const Value *Base = stripAndAccumulateMinimalOffsets(
424       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
425 
426   BytesOffset = OffsetAPInt.getSExtValue();
427   return Base;
428 }
429 
430 static const Value *
431 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
432                                      const DataLayout &DL,
433                                      bool AllowNonInbounds = false) {
434   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
435   if (!Ptr)
436     return nullptr;
437 
438   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
439                                           AllowNonInbounds);
440 }
441 
442 /// Clamp the information known for all returned values of a function
443 /// (identified by \p QueryingAA) into \p S.
444 template <typename AAType, typename StateType = typename AAType::StateType>
445 static void clampReturnedValueStates(
446     Attributor &A, const AAType &QueryingAA, StateType &S,
447     const IRPosition::CallBaseContext *CBContext = nullptr) {
448   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
449                     << QueryingAA << " into " << S << "\n");
450 
451   assert((QueryingAA.getIRPosition().getPositionKind() ==
452               IRPosition::IRP_RETURNED ||
453           QueryingAA.getIRPosition().getPositionKind() ==
454               IRPosition::IRP_CALL_SITE_RETURNED) &&
455          "Can only clamp returned value states for a function returned or call "
456          "site returned position!");
457 
458   // Use an optional state as there might not be any return values and we want
459   // to join (IntegerState::operator&) the state of all there are.
460   Optional<StateType> T;
461 
462   // Callback for each possibly returned value.
463   auto CheckReturnValue = [&](Value &RV) -> bool {
464     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
465     const AAType &AA =
466         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
467     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
468                       << " @ " << RVPos << "\n");
469     const StateType &AAS = AA.getState();
470     if (T.hasValue())
471       *T &= AAS;
472     else
473       T = AAS;
474     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
475                       << "\n");
476     return T->isValidState();
477   };
478 
479   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
480     S.indicatePessimisticFixpoint();
481   else if (T.hasValue())
482     S ^= *T;
483 }
484 
485 /// Helper class for generic deduction: return value -> returned position.
486 template <typename AAType, typename BaseType,
487           typename StateType = typename BaseType::StateType,
488           bool PropagateCallBaseContext = false>
489 struct AAReturnedFromReturnedValues : public BaseType {
490   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
491       : BaseType(IRP, A) {}
492 
493   /// See AbstractAttribute::updateImpl(...).
494   ChangeStatus updateImpl(Attributor &A) override {
495     StateType S(StateType::getBestState(this->getState()));
496     clampReturnedValueStates<AAType, StateType>(
497         A, *this, S,
498         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
499     // TODO: If we know we visited all returned values, thus no are assumed
500     // dead, we can take the known information from the state T.
501     return clampStateAndIndicateChange<StateType>(this->getState(), S);
502   }
503 };
504 
505 /// Clamp the information known at all call sites for a given argument
506 /// (identified by \p QueryingAA) into \p S.
507 template <typename AAType, typename StateType = typename AAType::StateType>
508 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
509                                         StateType &S) {
510   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
511                     << QueryingAA << " into " << S << "\n");
512 
513   assert(QueryingAA.getIRPosition().getPositionKind() ==
514              IRPosition::IRP_ARGUMENT &&
515          "Can only clamp call site argument states for an argument position!");
516 
517   // Use an optional state as there might not be any return values and we want
518   // to join (IntegerState::operator&) the state of all there are.
519   Optional<StateType> T;
520 
521   // The argument number which is also the call site argument number.
522   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
523 
524   auto CallSiteCheck = [&](AbstractCallSite ACS) {
525     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
526     // Check if a coresponding argument was found or if it is on not associated
527     // (which can happen for callback calls).
528     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
529       return false;
530 
531     const AAType &AA =
532         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
533     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
534                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
535     const StateType &AAS = AA.getState();
536     if (T.hasValue())
537       *T &= AAS;
538     else
539       T = AAS;
540     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
541                       << "\n");
542     return T->isValidState();
543   };
544 
545   bool AllCallSitesKnown;
546   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
547                               AllCallSitesKnown))
548     S.indicatePessimisticFixpoint();
549   else if (T.hasValue())
550     S ^= *T;
551 }
552 
553 /// This function is the bridge between argument position and the call base
554 /// context.
555 template <typename AAType, typename BaseType,
556           typename StateType = typename AAType::StateType>
557 bool getArgumentStateFromCallBaseContext(Attributor &A,
558                                          BaseType &QueryingAttribute,
559                                          IRPosition &Pos, StateType &State) {
560   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
561          "Expected an 'argument' position !");
562   const CallBase *CBContext = Pos.getCallBaseContext();
563   if (!CBContext)
564     return false;
565 
566   int ArgNo = Pos.getCallSiteArgNo();
567   assert(ArgNo >= 0 && "Invalid Arg No!");
568 
569   const auto &AA = A.getAAFor<AAType>(
570       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
571       DepClassTy::REQUIRED);
572   const StateType &CBArgumentState =
573       static_cast<const StateType &>(AA.getState());
574 
575   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
576                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
577                     << "\n");
578 
579   // NOTE: If we want to do call site grouping it should happen here.
580   State ^= CBArgumentState;
581   return true;
582 }
583 
584 /// Helper class for generic deduction: call site argument -> argument position.
585 template <typename AAType, typename BaseType,
586           typename StateType = typename AAType::StateType,
587           bool BridgeCallBaseContext = false>
588 struct AAArgumentFromCallSiteArguments : public BaseType {
589   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
590       : BaseType(IRP, A) {}
591 
592   /// See AbstractAttribute::updateImpl(...).
593   ChangeStatus updateImpl(Attributor &A) override {
594     StateType S = StateType::getBestState(this->getState());
595 
596     if (BridgeCallBaseContext) {
597       bool Success =
598           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
599               A, *this, this->getIRPosition(), S);
600       if (Success)
601         return clampStateAndIndicateChange<StateType>(this->getState(), S);
602     }
603     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
604 
605     // TODO: If we know we visited all incoming values, thus no are assumed
606     // dead, we can take the known information from the state T.
607     return clampStateAndIndicateChange<StateType>(this->getState(), S);
608   }
609 };
610 
611 /// Helper class for generic replication: function returned -> cs returned.
612 template <typename AAType, typename BaseType,
613           typename StateType = typename BaseType::StateType,
614           bool IntroduceCallBaseContext = false>
615 struct AACallSiteReturnedFromReturned : public BaseType {
616   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
617       : BaseType(IRP, A) {}
618 
619   /// See AbstractAttribute::updateImpl(...).
620   ChangeStatus updateImpl(Attributor &A) override {
621     assert(this->getIRPosition().getPositionKind() ==
622                IRPosition::IRP_CALL_SITE_RETURNED &&
623            "Can only wrap function returned positions for call site returned "
624            "positions!");
625     auto &S = this->getState();
626 
627     const Function *AssociatedFunction =
628         this->getIRPosition().getAssociatedFunction();
629     if (!AssociatedFunction)
630       return S.indicatePessimisticFixpoint();
631 
632     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
633     if (IntroduceCallBaseContext)
634       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
635                         << CBContext << "\n");
636 
637     IRPosition FnPos = IRPosition::returned(
638         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
639     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
640     return clampStateAndIndicateChange(S, AA.getState());
641   }
642 };
643 
644 /// Helper function to accumulate uses.
645 template <class AAType, typename StateType = typename AAType::StateType>
646 static void followUsesInContext(AAType &AA, Attributor &A,
647                                 MustBeExecutedContextExplorer &Explorer,
648                                 const Instruction *CtxI,
649                                 SetVector<const Use *> &Uses,
650                                 StateType &State) {
651   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
652   for (unsigned u = 0; u < Uses.size(); ++u) {
653     const Use *U = Uses[u];
654     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
655       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
656       if (Found && AA.followUseInMBEC(A, U, UserI, State))
657         for (const Use &Us : UserI->uses())
658           Uses.insert(&Us);
659     }
660   }
661 }
662 
663 /// Use the must-be-executed-context around \p I to add information into \p S.
664 /// The AAType class is required to have `followUseInMBEC` method with the
665 /// following signature and behaviour:
666 ///
667 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
668 /// U - Underlying use.
669 /// I - The user of the \p U.
670 /// Returns true if the value should be tracked transitively.
671 ///
672 template <class AAType, typename StateType = typename AAType::StateType>
673 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
674                              Instruction &CtxI) {
675 
676   // Container for (transitive) uses of the associated value.
677   SetVector<const Use *> Uses;
678   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
679     Uses.insert(&U);
680 
681   MustBeExecutedContextExplorer &Explorer =
682       A.getInfoCache().getMustBeExecutedContextExplorer();
683 
684   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
685 
686   if (S.isAtFixpoint())
687     return;
688 
689   SmallVector<const BranchInst *, 4> BrInsts;
690   auto Pred = [&](const Instruction *I) {
691     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
692       if (Br->isConditional())
693         BrInsts.push_back(Br);
694     return true;
695   };
696 
697   // Here, accumulate conditional branch instructions in the context. We
698   // explore the child paths and collect the known states. The disjunction of
699   // those states can be merged to its own state. Let ParentState_i be a state
700   // to indicate the known information for an i-th branch instruction in the
701   // context. ChildStates are created for its successors respectively.
702   //
703   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
704   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
705   //      ...
706   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
707   //
708   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
709   //
710   // FIXME: Currently, recursive branches are not handled. For example, we
711   // can't deduce that ptr must be dereferenced in below function.
712   //
713   // void f(int a, int c, int *ptr) {
714   //    if(a)
715   //      if (b) {
716   //        *ptr = 0;
717   //      } else {
718   //        *ptr = 1;
719   //      }
720   //    else {
721   //      if (b) {
722   //        *ptr = 0;
723   //      } else {
724   //        *ptr = 1;
725   //      }
726   //    }
727   // }
728 
729   Explorer.checkForAllContext(&CtxI, Pred);
730   for (const BranchInst *Br : BrInsts) {
731     StateType ParentState;
732 
733     // The known state of the parent state is a conjunction of children's
734     // known states so it is initialized with a best state.
735     ParentState.indicateOptimisticFixpoint();
736 
737     for (const BasicBlock *BB : Br->successors()) {
738       StateType ChildState;
739 
740       size_t BeforeSize = Uses.size();
741       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
742 
743       // Erase uses which only appear in the child.
744       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
745         It = Uses.erase(It);
746 
747       ParentState &= ChildState;
748     }
749 
750     // Use only known state.
751     S += ParentState;
752   }
753 }
754 
755 /// ------------------------ PointerInfo ---------------------------------------
756 
757 namespace llvm {
758 namespace AA {
759 namespace PointerInfo {
760 
761 /// An access kind description as used by AAPointerInfo.
762 struct OffsetAndSize;
763 
764 struct State;
765 
766 } // namespace PointerInfo
767 } // namespace AA
768 
769 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
770 template <>
771 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
772   using Access = AAPointerInfo::Access;
773   static inline Access getEmptyKey();
774   static inline Access getTombstoneKey();
775   static unsigned getHashValue(const Access &A);
776   static bool isEqual(const Access &LHS, const Access &RHS);
777 };
778 
779 /// Helper that allows OffsetAndSize as a key in a DenseMap.
780 template <>
781 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
782     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
783 
784 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
785 /// but the instruction
786 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
787   using Base = DenseMapInfo<Instruction *>;
788   using Access = AAPointerInfo::Access;
789   static inline Access getEmptyKey();
790   static inline Access getTombstoneKey();
791   static unsigned getHashValue(const Access &A);
792   static bool isEqual(const Access &LHS, const Access &RHS);
793 };
794 
795 } // namespace llvm
796 
797 /// Helper to represent an access offset and size, with logic to deal with
798 /// uncertainty and check for overlapping accesses.
799 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
800   using BaseTy = std::pair<int64_t, int64_t>;
801   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
802   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
803   int64_t getOffset() const { return first; }
804   int64_t getSize() const { return second; }
805   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
806 
807   /// Return true if this offset and size pair might describe an address that
808   /// overlaps with \p OAS.
809   bool mayOverlap(const OffsetAndSize &OAS) const {
810     // Any unknown value and we are giving up -> overlap.
811     if (OAS.getOffset() == OffsetAndSize::Unknown ||
812         OAS.getSize() == OffsetAndSize::Unknown ||
813         getOffset() == OffsetAndSize::Unknown ||
814         getSize() == OffsetAndSize::Unknown)
815       return true;
816 
817     // Check if one offset point is in the other interval [offset, offset+size].
818     return OAS.getOffset() + OAS.getSize() > getOffset() &&
819            OAS.getOffset() < getOffset() + getSize();
820   }
821 
822   /// Constant used to represent unknown offset or sizes.
823   static constexpr int64_t Unknown = 1 << 31;
824 };
825 
826 /// Implementation of the DenseMapInfo.
827 ///
828 ///{
829 inline llvm::AccessAsInstructionInfo::Access
830 llvm::AccessAsInstructionInfo::getEmptyKey() {
831   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
832 }
833 inline llvm::AccessAsInstructionInfo::Access
834 llvm::AccessAsInstructionInfo::getTombstoneKey() {
835   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
836                 nullptr);
837 }
838 unsigned llvm::AccessAsInstructionInfo::getHashValue(
839     const llvm::AccessAsInstructionInfo::Access &A) {
840   return Base::getHashValue(A.getRemoteInst());
841 }
842 bool llvm::AccessAsInstructionInfo::isEqual(
843     const llvm::AccessAsInstructionInfo::Access &LHS,
844     const llvm::AccessAsInstructionInfo::Access &RHS) {
845   return LHS.getRemoteInst() == RHS.getRemoteInst();
846 }
847 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
848 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
849   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
850                                nullptr);
851 }
852 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
853 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
854   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
855                                nullptr);
856 }
857 
858 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
859     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
860   return detail::combineHashValue(
861              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
862              (A.isWrittenValueYetUndetermined()
863                   ? ~0
864                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
865          A.getKind();
866 }
867 
868 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
869     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
870     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
871   return LHS == RHS;
872 }
873 ///}
874 
875 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
876 struct AA::PointerInfo::State : public AbstractState {
877 
878   /// Return the best possible representable state.
879   static State getBestState(const State &SIS) { return State(); }
880 
881   /// Return the worst possible representable state.
882   static State getWorstState(const State &SIS) {
883     State R;
884     R.indicatePessimisticFixpoint();
885     return R;
886   }
887 
888   State() {}
889   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
890   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
891 
892   const State &getAssumed() const { return *this; }
893 
894   /// See AbstractState::isValidState().
895   bool isValidState() const override { return BS.isValidState(); }
896 
897   /// See AbstractState::isAtFixpoint().
898   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
899 
900   /// See AbstractState::indicateOptimisticFixpoint().
901   ChangeStatus indicateOptimisticFixpoint() override {
902     BS.indicateOptimisticFixpoint();
903     return ChangeStatus::UNCHANGED;
904   }
905 
906   /// See AbstractState::indicatePessimisticFixpoint().
907   ChangeStatus indicatePessimisticFixpoint() override {
908     BS.indicatePessimisticFixpoint();
909     return ChangeStatus::CHANGED;
910   }
911 
912   State &operator=(const State &R) {
913     if (this == &R)
914       return *this;
915     BS = R.BS;
916     AccessBins = R.AccessBins;
917     return *this;
918   }
919 
920   State &operator=(State &&R) {
921     if (this == &R)
922       return *this;
923     std::swap(BS, R.BS);
924     std::swap(AccessBins, R.AccessBins);
925     return *this;
926   }
927 
928   bool operator==(const State &R) const {
929     if (BS != R.BS)
930       return false;
931     if (AccessBins.size() != R.AccessBins.size())
932       return false;
933     auto It = begin(), RIt = R.begin(), E = end();
934     while (It != E) {
935       if (It->getFirst() != RIt->getFirst())
936         return false;
937       auto &Accs = It->getSecond();
938       auto &RAccs = RIt->getSecond();
939       if (Accs.size() != RAccs.size())
940         return false;
941       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
942       while (AccIt != AccE) {
943         if (*AccIt != *RAccIt)
944           return false;
945         ++AccIt;
946         ++RAccIt;
947       }
948       ++It;
949       ++RIt;
950     }
951     return true;
952   }
953   bool operator!=(const State &R) const { return !(*this == R); }
954 
955   /// We store accesses in a set with the instruction as key.
956   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
957 
958   /// We store all accesses in bins denoted by their offset and size.
959   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
960 
961   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
962   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
963 
964 protected:
965   /// The bins with all the accesses for the associated pointer.
966   DenseMap<OffsetAndSize, Accesses> AccessBins;
967 
968   /// Add a new access to the state at offset \p Offset and with size \p Size.
969   /// The access is associated with \p I, writes \p Content (if anything), and
970   /// is of kind \p Kind.
971   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
972   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
973                          Optional<Value *> Content,
974                          AAPointerInfo::AccessKind Kind, Type *Ty,
975                          Instruction *RemoteI = nullptr,
976                          Accesses *BinPtr = nullptr) {
977     OffsetAndSize Key{Offset, Size};
978     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
979     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
980     // Check if we have an access for this instruction in this bin, if not,
981     // simply add it.
982     auto It = Bin.find(Acc);
983     if (It == Bin.end()) {
984       Bin.insert(Acc);
985       return ChangeStatus::CHANGED;
986     }
987     // If the existing access is the same as then new one, nothing changed.
988     AAPointerInfo::Access Before = *It;
989     // The new one will be combined with the existing one.
990     *It &= Acc;
991     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
992   }
993 
994   /// See AAPointerInfo::forallInterferingAccesses.
995   bool forallInterferingAccesses(
996       Instruction &I,
997       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
998     if (!isValidState())
999       return false;
1000     // First find the offset and size of I.
1001     OffsetAndSize OAS(-1, -1);
1002     for (auto &It : AccessBins) {
1003       for (auto &Access : It.getSecond()) {
1004         if (Access.getRemoteInst() == &I) {
1005           OAS = It.getFirst();
1006           break;
1007         }
1008       }
1009       if (OAS.getSize() != -1)
1010         break;
1011     }
1012     if (OAS.getSize() == -1)
1013       return true;
1014 
1015     // Now that we have an offset and size, find all overlapping ones and use
1016     // the callback on the accesses.
1017     for (auto &It : AccessBins) {
1018       OffsetAndSize ItOAS = It.getFirst();
1019       if (!OAS.mayOverlap(ItOAS))
1020         continue;
1021       for (auto &Access : It.getSecond())
1022         if (!CB(Access, OAS == ItOAS))
1023           return false;
1024     }
1025     return true;
1026   }
1027 
1028 private:
1029   /// State to track fixpoint and validity.
1030   BooleanState BS;
1031 };
1032 
1033 struct AAPointerInfoImpl
1034     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1035   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1036   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1037 
1038   /// See AbstractAttribute::initialize(...).
1039   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1040 
1041   /// See AbstractAttribute::getAsStr().
1042   const std::string getAsStr() const override {
1043     return std::string("PointerInfo ") +
1044            (isValidState() ? (std::string("#") +
1045                               std::to_string(AccessBins.size()) + " bins")
1046                            : "<invalid>");
1047   }
1048 
1049   /// See AbstractAttribute::manifest(...).
1050   ChangeStatus manifest(Attributor &A) override {
1051     return AAPointerInfo::manifest(A);
1052   }
1053 
1054   bool forallInterferingAccesses(
1055       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1056       const override {
1057     return State::forallInterferingAccesses(LI, CB);
1058   }
1059   bool forallInterferingAccesses(
1060       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1061       const override {
1062     return State::forallInterferingAccesses(SI, CB);
1063   }
1064 
1065   ChangeStatus translateAndAddCalleeState(Attributor &A,
1066                                           const AAPointerInfo &CalleeAA,
1067                                           int64_t CallArgOffset, CallBase &CB) {
1068     using namespace AA::PointerInfo;
1069     if (!CalleeAA.getState().isValidState() || !isValidState())
1070       return indicatePessimisticFixpoint();
1071 
1072     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1073     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1074 
1075     // Combine the accesses bin by bin.
1076     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1077     for (auto &It : CalleeImplAA.getState()) {
1078       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1079       if (CallArgOffset != OffsetAndSize::Unknown)
1080         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1081                             It.first.getSize());
1082       Accesses &Bin = AccessBins[OAS];
1083       for (const AAPointerInfo::Access &RAcc : It.second) {
1084         if (IsByval && !RAcc.isRead())
1085           continue;
1086         bool UsedAssumedInformation = false;
1087         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1088             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1089         AccessKind AK =
1090             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1091                                                  : AccessKind::AK_READ_WRITE));
1092         Changed =
1093             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1094                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1095       }
1096     }
1097     return Changed;
1098   }
1099 
1100   /// Statistic tracking for all AAPointerInfo implementations.
1101   /// See AbstractAttribute::trackStatistics().
1102   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1103 };
1104 
1105 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1106   using AccessKind = AAPointerInfo::AccessKind;
1107   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1108       : AAPointerInfoImpl(IRP, A) {}
1109 
1110   /// See AbstractAttribute::initialize(...).
1111   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1112 
1113   /// Deal with an access and signal if it was handled successfully.
1114   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1115                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1116                     ChangeStatus &Changed, Type *Ty,
1117                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1118     using namespace AA::PointerInfo;
1119     // No need to find a size if one is given or the offset is unknown.
1120     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1121         Ty) {
1122       const DataLayout &DL = A.getDataLayout();
1123       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1124       if (!AccessSize.isScalable())
1125         Size = AccessSize.getFixedSize();
1126     }
1127     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1128     return true;
1129   };
1130 
1131   /// Helper struct, will support ranges eventually.
1132   struct OffsetInfo {
1133     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1134 
1135     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1136   };
1137 
1138   /// See AbstractAttribute::updateImpl(...).
1139   ChangeStatus updateImpl(Attributor &A) override {
1140     using namespace AA::PointerInfo;
1141     State S = getState();
1142     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1143     Value &AssociatedValue = getAssociatedValue();
1144 
1145     const DataLayout &DL = A.getDataLayout();
1146     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1147     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1148 
1149     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1150                                      bool &Follow) {
1151       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1152       UsrOI = PtrOI;
1153       Follow = true;
1154       return true;
1155     };
1156 
1157     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1158       Value *CurPtr = U.get();
1159       User *Usr = U.getUser();
1160       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1161                         << *Usr << "\n");
1162 
1163       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1164 
1165       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1166         if (CE->isCast())
1167           return HandlePassthroughUser(Usr, PtrOI, Follow);
1168         if (CE->isCompare())
1169           return true;
1170         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1171           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1172                             << "\n");
1173           return false;
1174         }
1175       }
1176       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1177         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1178         UsrOI = PtrOI;
1179 
1180         // TODO: Use range information.
1181         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1182             !GEP->hasAllConstantIndices()) {
1183           UsrOI.Offset = OffsetAndSize::Unknown;
1184           Follow = true;
1185           return true;
1186         }
1187 
1188         SmallVector<Value *, 8> Indices;
1189         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1190           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1191             Indices.push_back(CIdx);
1192             continue;
1193           }
1194 
1195           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1196                             << " : " << *Idx << "\n");
1197           return false;
1198         }
1199         UsrOI.Offset = PtrOI.Offset +
1200                        DL.getIndexedOffsetInType(
1201                            CurPtr->getType()->getPointerElementType(), Indices);
1202         Follow = true;
1203         return true;
1204       }
1205       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1206         return HandlePassthroughUser(Usr, PtrOI, Follow);
1207 
1208       // For PHIs we need to take care of the recurrence explicitly as the value
1209       // might change while we iterate through a loop. For now, we give up if
1210       // the PHI is not invariant.
1211       if (isa<PHINode>(Usr)) {
1212         // Check if the PHI is invariant (so far).
1213         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1214         if (UsrOI == PtrOI)
1215           return true;
1216 
1217         // Check if the PHI operand has already an unknown offset as we can't
1218         // improve on that anymore.
1219         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1220           UsrOI = PtrOI;
1221           Follow = true;
1222           return true;
1223         }
1224 
1225         // Check if the PHI operand is not dependent on the PHI itself.
1226         APInt Offset(DL.getIndexTypeSizeInBits(AssociatedValue.getType()), 0);
1227         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1228                                     DL, Offset, /* AllowNonInbounds */ true)) {
1229           if (Offset != PtrOI.Offset) {
1230             LLVM_DEBUG(dbgs()
1231                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1232                        << *CurPtr << " in " << *Usr << "\n");
1233             return false;
1234           }
1235           return HandlePassthroughUser(Usr, PtrOI, Follow);
1236         }
1237 
1238         // TODO: Approximate in case we know the direction of the recurrence.
1239         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1240                           << *CurPtr << " in " << *Usr << "\n");
1241         UsrOI = PtrOI;
1242         UsrOI.Offset = OffsetAndSize::Unknown;
1243         Follow = true;
1244         return true;
1245       }
1246 
1247       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1248         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1249                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1250                             LoadI->getType());
1251       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1252         if (StoreI->getValueOperand() == CurPtr) {
1253           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1254                             << *StoreI << "\n");
1255           return false;
1256         }
1257         bool UsedAssumedInformation = false;
1258         Optional<Value *> Content = A.getAssumedSimplified(
1259             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1260         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1261                             PtrOI.Offset, Changed,
1262                             StoreI->getValueOperand()->getType());
1263       }
1264       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1265         if (CB->isLifetimeStartOrEnd())
1266           return true;
1267         if (CB->isArgOperand(&U)) {
1268           unsigned ArgNo = CB->getArgOperandNo(&U);
1269           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1270               *this, IRPosition::callsite_argument(*CB, ArgNo),
1271               DepClassTy::REQUIRED);
1272           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1273                     Changed;
1274           return true;
1275         }
1276         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1277                           << "\n");
1278         // TODO: Allow some call uses
1279         return false;
1280       }
1281 
1282       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1283       return false;
1284     };
1285     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1286                            /* CheckBBLivenessOnly */ true))
1287       return indicatePessimisticFixpoint();
1288 
1289     LLVM_DEBUG({
1290       dbgs() << "Accesses by bin after update:\n";
1291       for (auto &It : AccessBins) {
1292         dbgs() << "[" << It.first.getOffset() << "-"
1293                << It.first.getOffset() + It.first.getSize()
1294                << "] : " << It.getSecond().size() << "\n";
1295         for (auto &Acc : It.getSecond()) {
1296           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1297                  << "\n";
1298           if (Acc.getLocalInst() != Acc.getRemoteInst())
1299             dbgs() << "     -->                         "
1300                    << *Acc.getRemoteInst() << "\n";
1301           if (!Acc.isWrittenValueYetUndetermined())
1302             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1303         }
1304       }
1305     });
1306 
1307     return Changed;
1308   }
1309 
1310   /// See AbstractAttribute::trackStatistics()
1311   void trackStatistics() const override {
1312     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1313   }
1314 };
1315 
1316 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1317   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1318       : AAPointerInfoImpl(IRP, A) {}
1319 
1320   /// See AbstractAttribute::updateImpl(...).
1321   ChangeStatus updateImpl(Attributor &A) override {
1322     return indicatePessimisticFixpoint();
1323   }
1324 
1325   /// See AbstractAttribute::trackStatistics()
1326   void trackStatistics() const override {
1327     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1328   }
1329 };
1330 
1331 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1332   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1333       : AAPointerInfoFloating(IRP, A) {}
1334 
1335   /// See AbstractAttribute::initialize(...).
1336   void initialize(Attributor &A) override {
1337     AAPointerInfoFloating::initialize(A);
1338     if (getAnchorScope()->isDeclaration())
1339       indicatePessimisticFixpoint();
1340   }
1341 
1342   /// See AbstractAttribute::trackStatistics()
1343   void trackStatistics() const override {
1344     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1345   }
1346 };
1347 
1348 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1349   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1350       : AAPointerInfoFloating(IRP, A) {}
1351 
1352   /// See AbstractAttribute::updateImpl(...).
1353   ChangeStatus updateImpl(Attributor &A) override {
1354     using namespace AA::PointerInfo;
1355     // We handle memory intrinsics explicitly, at least the first (=
1356     // destination) and second (=source) arguments as we know how they are
1357     // accessed.
1358     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1359       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1360       int64_t LengthVal = OffsetAndSize::Unknown;
1361       if (Length)
1362         LengthVal = Length->getSExtValue();
1363       Value &Ptr = getAssociatedValue();
1364       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1365       ChangeStatus Changed;
1366       if (ArgNo == 0) {
1367         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1368                      nullptr, LengthVal);
1369       } else if (ArgNo == 1) {
1370         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1371                      nullptr, LengthVal);
1372       } else {
1373         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1374                           << *MI << "\n");
1375         return indicatePessimisticFixpoint();
1376       }
1377       return Changed;
1378     }
1379 
1380     // TODO: Once we have call site specific value information we can provide
1381     //       call site specific liveness information and then it makes
1382     //       sense to specialize attributes for call sites arguments instead of
1383     //       redirecting requests to the callee argument.
1384     Argument *Arg = getAssociatedArgument();
1385     if (!Arg)
1386       return indicatePessimisticFixpoint();
1387     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1388     auto &ArgAA =
1389         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1390     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1391   }
1392 
1393   /// See AbstractAttribute::trackStatistics()
1394   void trackStatistics() const override {
1395     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1396   }
1397 };
1398 
1399 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1400   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1401       : AAPointerInfoFloating(IRP, A) {}
1402 
1403   /// See AbstractAttribute::trackStatistics()
1404   void trackStatistics() const override {
1405     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1406   }
1407 };
1408 
1409 /// -----------------------NoUnwind Function Attribute--------------------------
1410 
1411 struct AANoUnwindImpl : AANoUnwind {
1412   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1413 
1414   const std::string getAsStr() const override {
1415     return getAssumed() ? "nounwind" : "may-unwind";
1416   }
1417 
1418   /// See AbstractAttribute::updateImpl(...).
1419   ChangeStatus updateImpl(Attributor &A) override {
1420     auto Opcodes = {
1421         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1422         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1423         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1424 
1425     auto CheckForNoUnwind = [&](Instruction &I) {
1426       if (!I.mayThrow())
1427         return true;
1428 
1429       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1430         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1431             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1432         return NoUnwindAA.isAssumedNoUnwind();
1433       }
1434       return false;
1435     };
1436 
1437     bool UsedAssumedInformation = false;
1438     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1439                                    UsedAssumedInformation))
1440       return indicatePessimisticFixpoint();
1441 
1442     return ChangeStatus::UNCHANGED;
1443   }
1444 };
1445 
1446 struct AANoUnwindFunction final : public AANoUnwindImpl {
1447   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1448       : AANoUnwindImpl(IRP, A) {}
1449 
1450   /// See AbstractAttribute::trackStatistics()
1451   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1452 };
1453 
1454 /// NoUnwind attribute deduction for a call sites.
1455 struct AANoUnwindCallSite final : AANoUnwindImpl {
1456   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1457       : AANoUnwindImpl(IRP, A) {}
1458 
1459   /// See AbstractAttribute::initialize(...).
1460   void initialize(Attributor &A) override {
1461     AANoUnwindImpl::initialize(A);
1462     Function *F = getAssociatedFunction();
1463     if (!F || F->isDeclaration())
1464       indicatePessimisticFixpoint();
1465   }
1466 
1467   /// See AbstractAttribute::updateImpl(...).
1468   ChangeStatus updateImpl(Attributor &A) override {
1469     // TODO: Once we have call site specific value information we can provide
1470     //       call site specific liveness information and then it makes
1471     //       sense to specialize attributes for call sites arguments instead of
1472     //       redirecting requests to the callee argument.
1473     Function *F = getAssociatedFunction();
1474     const IRPosition &FnPos = IRPosition::function(*F);
1475     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1476     return clampStateAndIndicateChange(getState(), FnAA.getState());
1477   }
1478 
1479   /// See AbstractAttribute::trackStatistics()
1480   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1481 };
1482 
1483 /// --------------------- Function Return Values -------------------------------
1484 
1485 /// "Attribute" that collects all potential returned values and the return
1486 /// instructions that they arise from.
1487 ///
1488 /// If there is a unique returned value R, the manifest method will:
1489 ///   - mark R with the "returned" attribute, if R is an argument.
1490 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1491 
1492   /// Mapping of values potentially returned by the associated function to the
1493   /// return instructions that might return them.
1494   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1495 
1496   /// State flags
1497   ///
1498   ///{
1499   bool IsFixed = false;
1500   bool IsValidState = true;
1501   ///}
1502 
1503 public:
1504   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1505       : AAReturnedValues(IRP, A) {}
1506 
1507   /// See AbstractAttribute::initialize(...).
1508   void initialize(Attributor &A) override {
1509     // Reset the state.
1510     IsFixed = false;
1511     IsValidState = true;
1512     ReturnedValues.clear();
1513 
1514     Function *F = getAssociatedFunction();
1515     if (!F || F->isDeclaration()) {
1516       indicatePessimisticFixpoint();
1517       return;
1518     }
1519     assert(!F->getReturnType()->isVoidTy() &&
1520            "Did not expect a void return type!");
1521 
1522     // The map from instruction opcodes to those instructions in the function.
1523     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1524 
1525     // Look through all arguments, if one is marked as returned we are done.
1526     for (Argument &Arg : F->args()) {
1527       if (Arg.hasReturnedAttr()) {
1528         auto &ReturnInstSet = ReturnedValues[&Arg];
1529         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1530           for (Instruction *RI : *Insts)
1531             ReturnInstSet.insert(cast<ReturnInst>(RI));
1532 
1533         indicateOptimisticFixpoint();
1534         return;
1535       }
1536     }
1537 
1538     if (!A.isFunctionIPOAmendable(*F))
1539       indicatePessimisticFixpoint();
1540   }
1541 
1542   /// See AbstractAttribute::manifest(...).
1543   ChangeStatus manifest(Attributor &A) override;
1544 
1545   /// See AbstractAttribute::getState(...).
1546   AbstractState &getState() override { return *this; }
1547 
1548   /// See AbstractAttribute::getState(...).
1549   const AbstractState &getState() const override { return *this; }
1550 
1551   /// See AbstractAttribute::updateImpl(Attributor &A).
1552   ChangeStatus updateImpl(Attributor &A) override;
1553 
1554   llvm::iterator_range<iterator> returned_values() override {
1555     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1556   }
1557 
1558   llvm::iterator_range<const_iterator> returned_values() const override {
1559     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1560   }
1561 
1562   /// Return the number of potential return values, -1 if unknown.
1563   size_t getNumReturnValues() const override {
1564     return isValidState() ? ReturnedValues.size() : -1;
1565   }
1566 
1567   /// Return an assumed unique return value if a single candidate is found. If
1568   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1569   /// Optional::NoneType.
1570   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1571 
1572   /// See AbstractState::checkForAllReturnedValues(...).
1573   bool checkForAllReturnedValuesAndReturnInsts(
1574       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1575       const override;
1576 
1577   /// Pretty print the attribute similar to the IR representation.
1578   const std::string getAsStr() const override;
1579 
1580   /// See AbstractState::isAtFixpoint().
1581   bool isAtFixpoint() const override { return IsFixed; }
1582 
1583   /// See AbstractState::isValidState().
1584   bool isValidState() const override { return IsValidState; }
1585 
1586   /// See AbstractState::indicateOptimisticFixpoint(...).
1587   ChangeStatus indicateOptimisticFixpoint() override {
1588     IsFixed = true;
1589     return ChangeStatus::UNCHANGED;
1590   }
1591 
1592   ChangeStatus indicatePessimisticFixpoint() override {
1593     IsFixed = true;
1594     IsValidState = false;
1595     return ChangeStatus::CHANGED;
1596   }
1597 };
1598 
1599 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1600   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1601 
1602   // Bookkeeping.
1603   assert(isValidState());
1604   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1605                   "Number of function with known return values");
1606 
1607   // Check if we have an assumed unique return value that we could manifest.
1608   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1609 
1610   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1611     return Changed;
1612 
1613   // Bookkeeping.
1614   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1615                   "Number of function with unique return");
1616   // If the assumed unique return value is an argument, annotate it.
1617   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1618     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1619             getAssociatedFunction()->getReturnType())) {
1620       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1621       Changed = IRAttribute::manifest(A);
1622     }
1623   }
1624   return Changed;
1625 }
1626 
1627 const std::string AAReturnedValuesImpl::getAsStr() const {
1628   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1629          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1630 }
1631 
1632 Optional<Value *>
1633 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1634   // If checkForAllReturnedValues provides a unique value, ignoring potential
1635   // undef values that can also be present, it is assumed to be the actual
1636   // return value and forwarded to the caller of this method. If there are
1637   // multiple, a nullptr is returned indicating there cannot be a unique
1638   // returned value.
1639   Optional<Value *> UniqueRV;
1640   Type *Ty = getAssociatedFunction()->getReturnType();
1641 
1642   auto Pred = [&](Value &RV) -> bool {
1643     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1644     return UniqueRV != Optional<Value *>(nullptr);
1645   };
1646 
1647   if (!A.checkForAllReturnedValues(Pred, *this))
1648     UniqueRV = nullptr;
1649 
1650   return UniqueRV;
1651 }
1652 
1653 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1654     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1655     const {
1656   if (!isValidState())
1657     return false;
1658 
1659   // Check all returned values but ignore call sites as long as we have not
1660   // encountered an overdefined one during an update.
1661   for (auto &It : ReturnedValues) {
1662     Value *RV = It.first;
1663     if (!Pred(*RV, It.second))
1664       return false;
1665   }
1666 
1667   return true;
1668 }
1669 
1670 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1671   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1672 
1673   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1674                            bool) -> bool {
1675     bool UsedAssumedInformation = false;
1676     Optional<Value *> SimpleRetVal =
1677         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1678     if (!SimpleRetVal.hasValue())
1679       return true;
1680     if (!SimpleRetVal.getValue())
1681       return false;
1682     Value *RetVal = *SimpleRetVal;
1683     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1684            "Assumed returned value should be valid in function scope!");
1685     if (ReturnedValues[RetVal].insert(&Ret))
1686       Changed = ChangeStatus::CHANGED;
1687     return true;
1688   };
1689 
1690   auto ReturnInstCB = [&](Instruction &I) {
1691     ReturnInst &Ret = cast<ReturnInst>(I);
1692     return genericValueTraversal<ReturnInst>(
1693         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1694         &I);
1695   };
1696 
1697   // Discover returned values from all live returned instructions in the
1698   // associated function.
1699   bool UsedAssumedInformation = false;
1700   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1701                                  UsedAssumedInformation))
1702     return indicatePessimisticFixpoint();
1703   return Changed;
1704 }
1705 
1706 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1707   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1708       : AAReturnedValuesImpl(IRP, A) {}
1709 
1710   /// See AbstractAttribute::trackStatistics()
1711   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1712 };
1713 
1714 /// Returned values information for a call sites.
1715 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1716   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1717       : AAReturnedValuesImpl(IRP, A) {}
1718 
1719   /// See AbstractAttribute::initialize(...).
1720   void initialize(Attributor &A) override {
1721     // TODO: Once we have call site specific value information we can provide
1722     //       call site specific liveness information and then it makes
1723     //       sense to specialize attributes for call sites instead of
1724     //       redirecting requests to the callee.
1725     llvm_unreachable("Abstract attributes for returned values are not "
1726                      "supported for call sites yet!");
1727   }
1728 
1729   /// See AbstractAttribute::updateImpl(...).
1730   ChangeStatus updateImpl(Attributor &A) override {
1731     return indicatePessimisticFixpoint();
1732   }
1733 
1734   /// See AbstractAttribute::trackStatistics()
1735   void trackStatistics() const override {}
1736 };
1737 
1738 /// ------------------------ NoSync Function Attribute -------------------------
1739 
1740 struct AANoSyncImpl : AANoSync {
1741   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1742 
1743   const std::string getAsStr() const override {
1744     return getAssumed() ? "nosync" : "may-sync";
1745   }
1746 
1747   /// See AbstractAttribute::updateImpl(...).
1748   ChangeStatus updateImpl(Attributor &A) override;
1749 
1750   /// Helper function used to determine whether an instruction is non-relaxed
1751   /// atomic. In other words, if an atomic instruction does not have unordered
1752   /// or monotonic ordering
1753   static bool isNonRelaxedAtomic(Instruction *I);
1754 
1755   /// Helper function specific for intrinsics which are potentially volatile
1756   static bool isNoSyncIntrinsic(Instruction *I);
1757 };
1758 
1759 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1760   if (!I->isAtomic())
1761     return false;
1762 
1763   if (auto *FI = dyn_cast<FenceInst>(I))
1764     // All legal orderings for fence are stronger than monotonic.
1765     return FI->getSyncScopeID() != SyncScope::SingleThread;
1766   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1767     // Unordered is not a legal ordering for cmpxchg.
1768     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1769             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1770   }
1771 
1772   AtomicOrdering Ordering;
1773   switch (I->getOpcode()) {
1774   case Instruction::AtomicRMW:
1775     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1776     break;
1777   case Instruction::Store:
1778     Ordering = cast<StoreInst>(I)->getOrdering();
1779     break;
1780   case Instruction::Load:
1781     Ordering = cast<LoadInst>(I)->getOrdering();
1782     break;
1783   default:
1784     llvm_unreachable(
1785         "New atomic operations need to be known in the attributor.");
1786   }
1787 
1788   return (Ordering != AtomicOrdering::Unordered &&
1789           Ordering != AtomicOrdering::Monotonic);
1790 }
1791 
1792 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1793 /// which would be nosync except that they have a volatile flag.  All other
1794 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1795 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1796   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1797     return !MI->isVolatile();
1798   return false;
1799 }
1800 
1801 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1802 
1803   auto CheckRWInstForNoSync = [&](Instruction &I) {
1804     /// We are looking for volatile instructions or Non-Relaxed atomics.
1805 
1806     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1807       if (CB->hasFnAttr(Attribute::NoSync))
1808         return true;
1809 
1810       if (isNoSyncIntrinsic(&I))
1811         return true;
1812 
1813       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1814           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1815       return NoSyncAA.isAssumedNoSync();
1816     }
1817 
1818     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1819       return true;
1820 
1821     return false;
1822   };
1823 
1824   auto CheckForNoSync = [&](Instruction &I) {
1825     // At this point we handled all read/write effects and they are all
1826     // nosync, so they can be skipped.
1827     if (I.mayReadOrWriteMemory())
1828       return true;
1829 
1830     // non-convergent and readnone imply nosync.
1831     return !cast<CallBase>(I).isConvergent();
1832   };
1833 
1834   bool UsedAssumedInformation = false;
1835   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1836                                           UsedAssumedInformation) ||
1837       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1838                                          UsedAssumedInformation))
1839     return indicatePessimisticFixpoint();
1840 
1841   return ChangeStatus::UNCHANGED;
1842 }
1843 
1844 struct AANoSyncFunction final : public AANoSyncImpl {
1845   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1846       : AANoSyncImpl(IRP, A) {}
1847 
1848   /// See AbstractAttribute::trackStatistics()
1849   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1850 };
1851 
1852 /// NoSync attribute deduction for a call sites.
1853 struct AANoSyncCallSite final : AANoSyncImpl {
1854   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1855       : AANoSyncImpl(IRP, A) {}
1856 
1857   /// See AbstractAttribute::initialize(...).
1858   void initialize(Attributor &A) override {
1859     AANoSyncImpl::initialize(A);
1860     Function *F = getAssociatedFunction();
1861     if (!F || F->isDeclaration())
1862       indicatePessimisticFixpoint();
1863   }
1864 
1865   /// See AbstractAttribute::updateImpl(...).
1866   ChangeStatus updateImpl(Attributor &A) override {
1867     // TODO: Once we have call site specific value information we can provide
1868     //       call site specific liveness information and then it makes
1869     //       sense to specialize attributes for call sites arguments instead of
1870     //       redirecting requests to the callee argument.
1871     Function *F = getAssociatedFunction();
1872     const IRPosition &FnPos = IRPosition::function(*F);
1873     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1874     return clampStateAndIndicateChange(getState(), FnAA.getState());
1875   }
1876 
1877   /// See AbstractAttribute::trackStatistics()
1878   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1879 };
1880 
1881 /// ------------------------ No-Free Attributes ----------------------------
1882 
1883 struct AANoFreeImpl : public AANoFree {
1884   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1885 
1886   /// See AbstractAttribute::updateImpl(...).
1887   ChangeStatus updateImpl(Attributor &A) override {
1888     auto CheckForNoFree = [&](Instruction &I) {
1889       const auto &CB = cast<CallBase>(I);
1890       if (CB.hasFnAttr(Attribute::NoFree))
1891         return true;
1892 
1893       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1894           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1895       return NoFreeAA.isAssumedNoFree();
1896     };
1897 
1898     bool UsedAssumedInformation = false;
1899     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1900                                            UsedAssumedInformation))
1901       return indicatePessimisticFixpoint();
1902     return ChangeStatus::UNCHANGED;
1903   }
1904 
1905   /// See AbstractAttribute::getAsStr().
1906   const std::string getAsStr() const override {
1907     return getAssumed() ? "nofree" : "may-free";
1908   }
1909 };
1910 
1911 struct AANoFreeFunction final : public AANoFreeImpl {
1912   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1913       : AANoFreeImpl(IRP, A) {}
1914 
1915   /// See AbstractAttribute::trackStatistics()
1916   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1917 };
1918 
1919 /// NoFree attribute deduction for a call sites.
1920 struct AANoFreeCallSite final : AANoFreeImpl {
1921   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1922       : AANoFreeImpl(IRP, A) {}
1923 
1924   /// See AbstractAttribute::initialize(...).
1925   void initialize(Attributor &A) override {
1926     AANoFreeImpl::initialize(A);
1927     Function *F = getAssociatedFunction();
1928     if (!F || F->isDeclaration())
1929       indicatePessimisticFixpoint();
1930   }
1931 
1932   /// See AbstractAttribute::updateImpl(...).
1933   ChangeStatus updateImpl(Attributor &A) override {
1934     // TODO: Once we have call site specific value information we can provide
1935     //       call site specific liveness information and then it makes
1936     //       sense to specialize attributes for call sites arguments instead of
1937     //       redirecting requests to the callee argument.
1938     Function *F = getAssociatedFunction();
1939     const IRPosition &FnPos = IRPosition::function(*F);
1940     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1941     return clampStateAndIndicateChange(getState(), FnAA.getState());
1942   }
1943 
1944   /// See AbstractAttribute::trackStatistics()
1945   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1946 };
1947 
1948 /// NoFree attribute for floating values.
1949 struct AANoFreeFloating : AANoFreeImpl {
1950   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1951       : AANoFreeImpl(IRP, A) {}
1952 
1953   /// See AbstractAttribute::trackStatistics()
1954   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1955 
1956   /// See Abstract Attribute::updateImpl(...).
1957   ChangeStatus updateImpl(Attributor &A) override {
1958     const IRPosition &IRP = getIRPosition();
1959 
1960     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1961         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1962     if (NoFreeAA.isAssumedNoFree())
1963       return ChangeStatus::UNCHANGED;
1964 
1965     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1966     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1967       Instruction *UserI = cast<Instruction>(U.getUser());
1968       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1969         if (CB->isBundleOperand(&U))
1970           return false;
1971         if (!CB->isArgOperand(&U))
1972           return true;
1973         unsigned ArgNo = CB->getArgOperandNo(&U);
1974 
1975         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1976             *this, IRPosition::callsite_argument(*CB, ArgNo),
1977             DepClassTy::REQUIRED);
1978         return NoFreeArg.isAssumedNoFree();
1979       }
1980 
1981       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1982           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1983         Follow = true;
1984         return true;
1985       }
1986       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1987           isa<ReturnInst>(UserI))
1988         return true;
1989 
1990       // Unknown user.
1991       return false;
1992     };
1993     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1994       return indicatePessimisticFixpoint();
1995 
1996     return ChangeStatus::UNCHANGED;
1997   }
1998 };
1999 
2000 /// NoFree attribute for a call site argument.
2001 struct AANoFreeArgument final : AANoFreeFloating {
2002   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2003       : AANoFreeFloating(IRP, A) {}
2004 
2005   /// See AbstractAttribute::trackStatistics()
2006   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2007 };
2008 
2009 /// NoFree attribute for call site arguments.
2010 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2011   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2012       : AANoFreeFloating(IRP, A) {}
2013 
2014   /// See AbstractAttribute::updateImpl(...).
2015   ChangeStatus updateImpl(Attributor &A) override {
2016     // TODO: Once we have call site specific value information we can provide
2017     //       call site specific liveness information and then it makes
2018     //       sense to specialize attributes for call sites arguments instead of
2019     //       redirecting requests to the callee argument.
2020     Argument *Arg = getAssociatedArgument();
2021     if (!Arg)
2022       return indicatePessimisticFixpoint();
2023     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2024     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2025     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2026   }
2027 
2028   /// See AbstractAttribute::trackStatistics()
2029   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2030 };
2031 
2032 /// NoFree attribute for function return value.
2033 struct AANoFreeReturned final : AANoFreeFloating {
2034   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2035       : AANoFreeFloating(IRP, A) {
2036     llvm_unreachable("NoFree is not applicable to function returns!");
2037   }
2038 
2039   /// See AbstractAttribute::initialize(...).
2040   void initialize(Attributor &A) override {
2041     llvm_unreachable("NoFree is not applicable to function returns!");
2042   }
2043 
2044   /// See AbstractAttribute::updateImpl(...).
2045   ChangeStatus updateImpl(Attributor &A) override {
2046     llvm_unreachable("NoFree is not applicable to function returns!");
2047   }
2048 
2049   /// See AbstractAttribute::trackStatistics()
2050   void trackStatistics() const override {}
2051 };
2052 
2053 /// NoFree attribute deduction for a call site return value.
2054 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2055   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2056       : AANoFreeFloating(IRP, A) {}
2057 
2058   ChangeStatus manifest(Attributor &A) override {
2059     return ChangeStatus::UNCHANGED;
2060   }
2061   /// See AbstractAttribute::trackStatistics()
2062   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2063 };
2064 
2065 /// ------------------------ NonNull Argument Attribute ------------------------
2066 static int64_t getKnownNonNullAndDerefBytesForUse(
2067     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2068     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2069   TrackUse = false;
2070 
2071   const Value *UseV = U->get();
2072   if (!UseV->getType()->isPointerTy())
2073     return 0;
2074 
2075   // We need to follow common pointer manipulation uses to the accesses they
2076   // feed into. We can try to be smart to avoid looking through things we do not
2077   // like for now, e.g., non-inbounds GEPs.
2078   if (isa<CastInst>(I)) {
2079     TrackUse = true;
2080     return 0;
2081   }
2082 
2083   if (isa<GetElementPtrInst>(I)) {
2084     TrackUse = true;
2085     return 0;
2086   }
2087 
2088   Type *PtrTy = UseV->getType();
2089   const Function *F = I->getFunction();
2090   bool NullPointerIsDefined =
2091       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2092   const DataLayout &DL = A.getInfoCache().getDL();
2093   if (const auto *CB = dyn_cast<CallBase>(I)) {
2094     if (CB->isBundleOperand(U)) {
2095       if (RetainedKnowledge RK = getKnowledgeFromUse(
2096               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2097         IsNonNull |=
2098             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2099         return RK.ArgValue;
2100       }
2101       return 0;
2102     }
2103 
2104     if (CB->isCallee(U)) {
2105       IsNonNull |= !NullPointerIsDefined;
2106       return 0;
2107     }
2108 
2109     unsigned ArgNo = CB->getArgOperandNo(U);
2110     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2111     // As long as we only use known information there is no need to track
2112     // dependences here.
2113     auto &DerefAA =
2114         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2115     IsNonNull |= DerefAA.isKnownNonNull();
2116     return DerefAA.getKnownDereferenceableBytes();
2117   }
2118 
2119   int64_t Offset;
2120   const Value *Base =
2121       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2122   if (Base) {
2123     if (Base == &AssociatedValue &&
2124         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2125       int64_t DerefBytes =
2126           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2127 
2128       IsNonNull |= !NullPointerIsDefined;
2129       return std::max(int64_t(0), DerefBytes);
2130     }
2131   }
2132 
2133   /// Corner case when an offset is 0.
2134   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2135                                               /*AllowNonInbounds*/ true);
2136   if (Base) {
2137     if (Offset == 0 && Base == &AssociatedValue &&
2138         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2139       int64_t DerefBytes =
2140           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2141       IsNonNull |= !NullPointerIsDefined;
2142       return std::max(int64_t(0), DerefBytes);
2143     }
2144   }
2145 
2146   return 0;
2147 }
2148 
2149 struct AANonNullImpl : AANonNull {
2150   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2151       : AANonNull(IRP, A),
2152         NullIsDefined(NullPointerIsDefined(
2153             getAnchorScope(),
2154             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2155 
2156   /// See AbstractAttribute::initialize(...).
2157   void initialize(Attributor &A) override {
2158     Value &V = getAssociatedValue();
2159     if (!NullIsDefined &&
2160         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2161                 /* IgnoreSubsumingPositions */ false, &A)) {
2162       indicateOptimisticFixpoint();
2163       return;
2164     }
2165 
2166     if (isa<ConstantPointerNull>(V)) {
2167       indicatePessimisticFixpoint();
2168       return;
2169     }
2170 
2171     AANonNull::initialize(A);
2172 
2173     bool CanBeNull, CanBeFreed;
2174     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2175                                          CanBeFreed)) {
2176       if (!CanBeNull) {
2177         indicateOptimisticFixpoint();
2178         return;
2179       }
2180     }
2181 
2182     if (isa<GlobalValue>(&getAssociatedValue())) {
2183       indicatePessimisticFixpoint();
2184       return;
2185     }
2186 
2187     if (Instruction *CtxI = getCtxI())
2188       followUsesInMBEC(*this, A, getState(), *CtxI);
2189   }
2190 
2191   /// See followUsesInMBEC
2192   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2193                        AANonNull::StateType &State) {
2194     bool IsNonNull = false;
2195     bool TrackUse = false;
2196     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2197                                        IsNonNull, TrackUse);
2198     State.setKnown(IsNonNull);
2199     return TrackUse;
2200   }
2201 
2202   /// See AbstractAttribute::getAsStr().
2203   const std::string getAsStr() const override {
2204     return getAssumed() ? "nonnull" : "may-null";
2205   }
2206 
2207   /// Flag to determine if the underlying value can be null and still allow
2208   /// valid accesses.
2209   const bool NullIsDefined;
2210 };
2211 
2212 /// NonNull attribute for a floating value.
2213 struct AANonNullFloating : public AANonNullImpl {
2214   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2215       : AANonNullImpl(IRP, A) {}
2216 
2217   /// See AbstractAttribute::updateImpl(...).
2218   ChangeStatus updateImpl(Attributor &A) override {
2219     const DataLayout &DL = A.getDataLayout();
2220 
2221     DominatorTree *DT = nullptr;
2222     AssumptionCache *AC = nullptr;
2223     InformationCache &InfoCache = A.getInfoCache();
2224     if (const Function *Fn = getAnchorScope()) {
2225       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2226       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2227     }
2228 
2229     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2230                             AANonNull::StateType &T, bool Stripped) -> bool {
2231       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2232                                              DepClassTy::REQUIRED);
2233       if (!Stripped && this == &AA) {
2234         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2235           T.indicatePessimisticFixpoint();
2236       } else {
2237         // Use abstract attribute information.
2238         const AANonNull::StateType &NS = AA.getState();
2239         T ^= NS;
2240       }
2241       return T.isValidState();
2242     };
2243 
2244     StateType T;
2245     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2246                                           VisitValueCB, getCtxI()))
2247       return indicatePessimisticFixpoint();
2248 
2249     return clampStateAndIndicateChange(getState(), T);
2250   }
2251 
2252   /// See AbstractAttribute::trackStatistics()
2253   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2254 };
2255 
2256 /// NonNull attribute for function return value.
2257 struct AANonNullReturned final
2258     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2259   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2260       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2261 
2262   /// See AbstractAttribute::getAsStr().
2263   const std::string getAsStr() const override {
2264     return getAssumed() ? "nonnull" : "may-null";
2265   }
2266 
2267   /// See AbstractAttribute::trackStatistics()
2268   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2269 };
2270 
2271 /// NonNull attribute for function argument.
2272 struct AANonNullArgument final
2273     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2274   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2275       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2276 
2277   /// See AbstractAttribute::trackStatistics()
2278   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2279 };
2280 
2281 struct AANonNullCallSiteArgument final : AANonNullFloating {
2282   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2283       : AANonNullFloating(IRP, A) {}
2284 
2285   /// See AbstractAttribute::trackStatistics()
2286   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2287 };
2288 
2289 /// NonNull attribute for a call site return position.
2290 struct AANonNullCallSiteReturned final
2291     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2292   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2293       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2294 
2295   /// See AbstractAttribute::trackStatistics()
2296   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2297 };
2298 
2299 /// ------------------------ No-Recurse Attributes ----------------------------
2300 
2301 struct AANoRecurseImpl : public AANoRecurse {
2302   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2303 
2304   /// See AbstractAttribute::getAsStr()
2305   const std::string getAsStr() const override {
2306     return getAssumed() ? "norecurse" : "may-recurse";
2307   }
2308 };
2309 
2310 struct AANoRecurseFunction final : AANoRecurseImpl {
2311   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2312       : AANoRecurseImpl(IRP, A) {}
2313 
2314   /// See AbstractAttribute::initialize(...).
2315   void initialize(Attributor &A) override {
2316     AANoRecurseImpl::initialize(A);
2317     if (const Function *F = getAnchorScope())
2318       if (A.getInfoCache().getSccSize(*F) != 1)
2319         indicatePessimisticFixpoint();
2320   }
2321 
2322   /// See AbstractAttribute::updateImpl(...).
2323   ChangeStatus updateImpl(Attributor &A) override {
2324 
2325     // If all live call sites are known to be no-recurse, we are as well.
2326     auto CallSitePred = [&](AbstractCallSite ACS) {
2327       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2328           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2329           DepClassTy::NONE);
2330       return NoRecurseAA.isKnownNoRecurse();
2331     };
2332     bool AllCallSitesKnown;
2333     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2334       // If we know all call sites and all are known no-recurse, we are done.
2335       // If all known call sites, which might not be all that exist, are known
2336       // to be no-recurse, we are not done but we can continue to assume
2337       // no-recurse. If one of the call sites we have not visited will become
2338       // live, another update is triggered.
2339       if (AllCallSitesKnown)
2340         indicateOptimisticFixpoint();
2341       return ChangeStatus::UNCHANGED;
2342     }
2343 
2344     // If the above check does not hold anymore we look at the calls.
2345     auto CheckForNoRecurse = [&](Instruction &I) {
2346       const auto &CB = cast<CallBase>(I);
2347       if (CB.hasFnAttr(Attribute::NoRecurse))
2348         return true;
2349 
2350       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2351           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2352       if (!NoRecurseAA.isAssumedNoRecurse())
2353         return false;
2354 
2355       // Recursion to the same function
2356       if (CB.getCalledFunction() == getAnchorScope())
2357         return false;
2358 
2359       return true;
2360     };
2361 
2362     bool UsedAssumedInformation = false;
2363     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2364                                            UsedAssumedInformation))
2365       return indicatePessimisticFixpoint();
2366     return ChangeStatus::UNCHANGED;
2367   }
2368 
2369   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2370 };
2371 
2372 /// NoRecurse attribute deduction for a call sites.
2373 struct AANoRecurseCallSite final : AANoRecurseImpl {
2374   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2375       : AANoRecurseImpl(IRP, A) {}
2376 
2377   /// See AbstractAttribute::initialize(...).
2378   void initialize(Attributor &A) override {
2379     AANoRecurseImpl::initialize(A);
2380     Function *F = getAssociatedFunction();
2381     if (!F || F->isDeclaration())
2382       indicatePessimisticFixpoint();
2383   }
2384 
2385   /// See AbstractAttribute::updateImpl(...).
2386   ChangeStatus updateImpl(Attributor &A) override {
2387     // TODO: Once we have call site specific value information we can provide
2388     //       call site specific liveness information and then it makes
2389     //       sense to specialize attributes for call sites arguments instead of
2390     //       redirecting requests to the callee argument.
2391     Function *F = getAssociatedFunction();
2392     const IRPosition &FnPos = IRPosition::function(*F);
2393     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2394     return clampStateAndIndicateChange(getState(), FnAA.getState());
2395   }
2396 
2397   /// See AbstractAttribute::trackStatistics()
2398   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2399 };
2400 
2401 /// -------------------- Undefined-Behavior Attributes ------------------------
2402 
2403 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2404   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2405       : AAUndefinedBehavior(IRP, A) {}
2406 
2407   /// See AbstractAttribute::updateImpl(...).
2408   // through a pointer (i.e. also branches etc.)
2409   ChangeStatus updateImpl(Attributor &A) override {
2410     const size_t UBPrevSize = KnownUBInsts.size();
2411     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2412 
2413     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2414       // Lang ref now states volatile store is not UB, let's skip them.
2415       if (I.isVolatile() && I.mayWriteToMemory())
2416         return true;
2417 
2418       // Skip instructions that are already saved.
2419       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2420         return true;
2421 
2422       // If we reach here, we know we have an instruction
2423       // that accesses memory through a pointer operand,
2424       // for which getPointerOperand() should give it to us.
2425       Value *PtrOp =
2426           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2427       assert(PtrOp &&
2428              "Expected pointer operand of memory accessing instruction");
2429 
2430       // Either we stopped and the appropriate action was taken,
2431       // or we got back a simplified value to continue.
2432       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2433       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2434         return true;
2435       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2436 
2437       // A memory access through a pointer is considered UB
2438       // only if the pointer has constant null value.
2439       // TODO: Expand it to not only check constant values.
2440       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2441         AssumedNoUBInsts.insert(&I);
2442         return true;
2443       }
2444       const Type *PtrTy = PtrOpVal->getType();
2445 
2446       // Because we only consider instructions inside functions,
2447       // assume that a parent function exists.
2448       const Function *F = I.getFunction();
2449 
2450       // A memory access using constant null pointer is only considered UB
2451       // if null pointer is _not_ defined for the target platform.
2452       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2453         AssumedNoUBInsts.insert(&I);
2454       else
2455         KnownUBInsts.insert(&I);
2456       return true;
2457     };
2458 
2459     auto InspectBrInstForUB = [&](Instruction &I) {
2460       // A conditional branch instruction is considered UB if it has `undef`
2461       // condition.
2462 
2463       // Skip instructions that are already saved.
2464       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2465         return true;
2466 
2467       // We know we have a branch instruction.
2468       auto *BrInst = cast<BranchInst>(&I);
2469 
2470       // Unconditional branches are never considered UB.
2471       if (BrInst->isUnconditional())
2472         return true;
2473 
2474       // Either we stopped and the appropriate action was taken,
2475       // or we got back a simplified value to continue.
2476       Optional<Value *> SimplifiedCond =
2477           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2478       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2479         return true;
2480       AssumedNoUBInsts.insert(&I);
2481       return true;
2482     };
2483 
2484     auto InspectCallSiteForUB = [&](Instruction &I) {
2485       // Check whether a callsite always cause UB or not
2486 
2487       // Skip instructions that are already saved.
2488       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2489         return true;
2490 
2491       // Check nonnull and noundef argument attribute violation for each
2492       // callsite.
2493       CallBase &CB = cast<CallBase>(I);
2494       Function *Callee = CB.getCalledFunction();
2495       if (!Callee)
2496         return true;
2497       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2498         // If current argument is known to be simplified to null pointer and the
2499         // corresponding argument position is known to have nonnull attribute,
2500         // the argument is poison. Furthermore, if the argument is poison and
2501         // the position is known to have noundef attriubte, this callsite is
2502         // considered UB.
2503         if (idx >= Callee->arg_size())
2504           break;
2505         Value *ArgVal = CB.getArgOperand(idx);
2506         if (!ArgVal)
2507           continue;
2508         // Here, we handle three cases.
2509         //   (1) Not having a value means it is dead. (we can replace the value
2510         //       with undef)
2511         //   (2) Simplified to undef. The argument violate noundef attriubte.
2512         //   (3) Simplified to null pointer where known to be nonnull.
2513         //       The argument is a poison value and violate noundef attribute.
2514         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2515         auto &NoUndefAA =
2516             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2517         if (!NoUndefAA.isKnownNoUndef())
2518           continue;
2519         bool UsedAssumedInformation = false;
2520         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2521             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2522         if (UsedAssumedInformation)
2523           continue;
2524         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2525           return true;
2526         if (!SimplifiedVal.hasValue() ||
2527             isa<UndefValue>(*SimplifiedVal.getValue())) {
2528           KnownUBInsts.insert(&I);
2529           continue;
2530         }
2531         if (!ArgVal->getType()->isPointerTy() ||
2532             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2533           continue;
2534         auto &NonNullAA =
2535             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2536         if (NonNullAA.isKnownNonNull())
2537           KnownUBInsts.insert(&I);
2538       }
2539       return true;
2540     };
2541 
2542     auto InspectReturnInstForUB =
2543         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2544           // Check if a return instruction always cause UB or not
2545           // Note: It is guaranteed that the returned position of the anchor
2546           //       scope has noundef attribute when this is called.
2547           //       We also ensure the return position is not "assumed dead"
2548           //       because the returned value was then potentially simplified to
2549           //       `undef` in AAReturnedValues without removing the `noundef`
2550           //       attribute yet.
2551 
2552           // When the returned position has noundef attriubte, UB occur in the
2553           // following cases.
2554           //   (1) Returned value is known to be undef.
2555           //   (2) The value is known to be a null pointer and the returned
2556           //       position has nonnull attribute (because the returned value is
2557           //       poison).
2558           bool FoundUB = false;
2559           if (isa<UndefValue>(V)) {
2560             FoundUB = true;
2561           } else {
2562             if (isa<ConstantPointerNull>(V)) {
2563               auto &NonNullAA = A.getAAFor<AANonNull>(
2564                   *this, IRPosition::returned(*getAnchorScope()),
2565                   DepClassTy::NONE);
2566               if (NonNullAA.isKnownNonNull())
2567                 FoundUB = true;
2568             }
2569           }
2570 
2571           if (FoundUB)
2572             for (ReturnInst *RI : RetInsts)
2573               KnownUBInsts.insert(RI);
2574           return true;
2575         };
2576 
2577     bool UsedAssumedInformation = false;
2578     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2579                               {Instruction::Load, Instruction::Store,
2580                                Instruction::AtomicCmpXchg,
2581                                Instruction::AtomicRMW},
2582                               UsedAssumedInformation,
2583                               /* CheckBBLivenessOnly */ true);
2584     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2585                               UsedAssumedInformation,
2586                               /* CheckBBLivenessOnly */ true);
2587     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2588                                       UsedAssumedInformation);
2589 
2590     // If the returned position of the anchor scope has noundef attriubte, check
2591     // all returned instructions.
2592     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2593       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2594       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2595         auto &RetPosNoUndefAA =
2596             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2597         if (RetPosNoUndefAA.isKnownNoUndef())
2598           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2599                                                     *this);
2600       }
2601     }
2602 
2603     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2604         UBPrevSize != KnownUBInsts.size())
2605       return ChangeStatus::CHANGED;
2606     return ChangeStatus::UNCHANGED;
2607   }
2608 
2609   bool isKnownToCauseUB(Instruction *I) const override {
2610     return KnownUBInsts.count(I);
2611   }
2612 
2613   bool isAssumedToCauseUB(Instruction *I) const override {
2614     // In simple words, if an instruction is not in the assumed to _not_
2615     // cause UB, then it is assumed UB (that includes those
2616     // in the KnownUBInsts set). The rest is boilerplate
2617     // is to ensure that it is one of the instructions we test
2618     // for UB.
2619 
2620     switch (I->getOpcode()) {
2621     case Instruction::Load:
2622     case Instruction::Store:
2623     case Instruction::AtomicCmpXchg:
2624     case Instruction::AtomicRMW:
2625       return !AssumedNoUBInsts.count(I);
2626     case Instruction::Br: {
2627       auto BrInst = cast<BranchInst>(I);
2628       if (BrInst->isUnconditional())
2629         return false;
2630       return !AssumedNoUBInsts.count(I);
2631     } break;
2632     default:
2633       return false;
2634     }
2635     return false;
2636   }
2637 
2638   ChangeStatus manifest(Attributor &A) override {
2639     if (KnownUBInsts.empty())
2640       return ChangeStatus::UNCHANGED;
2641     for (Instruction *I : KnownUBInsts)
2642       A.changeToUnreachableAfterManifest(I);
2643     return ChangeStatus::CHANGED;
2644   }
2645 
2646   /// See AbstractAttribute::getAsStr()
2647   const std::string getAsStr() const override {
2648     return getAssumed() ? "undefined-behavior" : "no-ub";
2649   }
2650 
2651   /// Note: The correctness of this analysis depends on the fact that the
2652   /// following 2 sets will stop changing after some point.
2653   /// "Change" here means that their size changes.
2654   /// The size of each set is monotonically increasing
2655   /// (we only add items to them) and it is upper bounded by the number of
2656   /// instructions in the processed function (we can never save more
2657   /// elements in either set than this number). Hence, at some point,
2658   /// they will stop increasing.
2659   /// Consequently, at some point, both sets will have stopped
2660   /// changing, effectively making the analysis reach a fixpoint.
2661 
2662   /// Note: These 2 sets are disjoint and an instruction can be considered
2663   /// one of 3 things:
2664   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2665   ///    the KnownUBInsts set.
2666   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2667   ///    has a reason to assume it).
2668   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2669   ///    could not find a reason to assume or prove that it can cause UB,
2670   ///    hence it assumes it doesn't. We have a set for these instructions
2671   ///    so that we don't reprocess them in every update.
2672   ///    Note however that instructions in this set may cause UB.
2673 
2674 protected:
2675   /// A set of all live instructions _known_ to cause UB.
2676   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2677 
2678 private:
2679   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2680   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2681 
2682   // Should be called on updates in which if we're processing an instruction
2683   // \p I that depends on a value \p V, one of the following has to happen:
2684   // - If the value is assumed, then stop.
2685   // - If the value is known but undef, then consider it UB.
2686   // - Otherwise, do specific processing with the simplified value.
2687   // We return None in the first 2 cases to signify that an appropriate
2688   // action was taken and the caller should stop.
2689   // Otherwise, we return the simplified value that the caller should
2690   // use for specific processing.
2691   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2692                                          Instruction *I) {
2693     bool UsedAssumedInformation = false;
2694     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2695         IRPosition::value(*V), *this, UsedAssumedInformation);
2696     if (!UsedAssumedInformation) {
2697       // Don't depend on assumed values.
2698       if (!SimplifiedV.hasValue()) {
2699         // If it is known (which we tested above) but it doesn't have a value,
2700         // then we can assume `undef` and hence the instruction is UB.
2701         KnownUBInsts.insert(I);
2702         return llvm::None;
2703       }
2704       if (!SimplifiedV.getValue())
2705         return nullptr;
2706       V = *SimplifiedV;
2707     }
2708     if (isa<UndefValue>(V)) {
2709       KnownUBInsts.insert(I);
2710       return llvm::None;
2711     }
2712     return V;
2713   }
2714 };
2715 
2716 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2717   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2718       : AAUndefinedBehaviorImpl(IRP, A) {}
2719 
2720   /// See AbstractAttribute::trackStatistics()
2721   void trackStatistics() const override {
2722     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2723                "Number of instructions known to have UB");
2724     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2725         KnownUBInsts.size();
2726   }
2727 };
2728 
2729 /// ------------------------ Will-Return Attributes ----------------------------
2730 
2731 // Helper function that checks whether a function has any cycle which we don't
2732 // know if it is bounded or not.
2733 // Loops with maximum trip count are considered bounded, any other cycle not.
2734 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2735   ScalarEvolution *SE =
2736       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2737   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2738   // If either SCEV or LoopInfo is not available for the function then we assume
2739   // any cycle to be unbounded cycle.
2740   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2741   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2742   if (!SE || !LI) {
2743     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2744       if (SCCI.hasCycle())
2745         return true;
2746     return false;
2747   }
2748 
2749   // If there's irreducible control, the function may contain non-loop cycles.
2750   if (mayContainIrreducibleControl(F, LI))
2751     return true;
2752 
2753   // Any loop that does not have a max trip count is considered unbounded cycle.
2754   for (auto *L : LI->getLoopsInPreorder()) {
2755     if (!SE->getSmallConstantMaxTripCount(L))
2756       return true;
2757   }
2758   return false;
2759 }
2760 
2761 struct AAWillReturnImpl : public AAWillReturn {
2762   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2763       : AAWillReturn(IRP, A) {}
2764 
2765   /// See AbstractAttribute::initialize(...).
2766   void initialize(Attributor &A) override {
2767     AAWillReturn::initialize(A);
2768 
2769     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2770       indicateOptimisticFixpoint();
2771       return;
2772     }
2773   }
2774 
2775   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2776   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2777     // Check for `mustprogress` in the scope and the associated function which
2778     // might be different if this is a call site.
2779     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2780         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2781       return false;
2782 
2783     const auto &MemAA =
2784         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2785     if (!MemAA.isAssumedReadOnly())
2786       return false;
2787     if (KnownOnly && !MemAA.isKnownReadOnly())
2788       return false;
2789     if (!MemAA.isKnownReadOnly())
2790       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2791 
2792     return true;
2793   }
2794 
2795   /// See AbstractAttribute::updateImpl(...).
2796   ChangeStatus updateImpl(Attributor &A) override {
2797     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2798       return ChangeStatus::UNCHANGED;
2799 
2800     auto CheckForWillReturn = [&](Instruction &I) {
2801       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2802       const auto &WillReturnAA =
2803           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2804       if (WillReturnAA.isKnownWillReturn())
2805         return true;
2806       if (!WillReturnAA.isAssumedWillReturn())
2807         return false;
2808       const auto &NoRecurseAA =
2809           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2810       return NoRecurseAA.isAssumedNoRecurse();
2811     };
2812 
2813     bool UsedAssumedInformation = false;
2814     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2815                                            UsedAssumedInformation))
2816       return indicatePessimisticFixpoint();
2817 
2818     return ChangeStatus::UNCHANGED;
2819   }
2820 
2821   /// See AbstractAttribute::getAsStr()
2822   const std::string getAsStr() const override {
2823     return getAssumed() ? "willreturn" : "may-noreturn";
2824   }
2825 };
2826 
2827 struct AAWillReturnFunction final : AAWillReturnImpl {
2828   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2829       : AAWillReturnImpl(IRP, A) {}
2830 
2831   /// See AbstractAttribute::initialize(...).
2832   void initialize(Attributor &A) override {
2833     AAWillReturnImpl::initialize(A);
2834 
2835     Function *F = getAnchorScope();
2836     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2837       indicatePessimisticFixpoint();
2838   }
2839 
2840   /// See AbstractAttribute::trackStatistics()
2841   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2842 };
2843 
2844 /// WillReturn attribute deduction for a call sites.
2845 struct AAWillReturnCallSite final : AAWillReturnImpl {
2846   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2847       : AAWillReturnImpl(IRP, A) {}
2848 
2849   /// See AbstractAttribute::initialize(...).
2850   void initialize(Attributor &A) override {
2851     AAWillReturnImpl::initialize(A);
2852     Function *F = getAssociatedFunction();
2853     if (!F || !A.isFunctionIPOAmendable(*F))
2854       indicatePessimisticFixpoint();
2855   }
2856 
2857   /// See AbstractAttribute::updateImpl(...).
2858   ChangeStatus updateImpl(Attributor &A) override {
2859     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2860       return ChangeStatus::UNCHANGED;
2861 
2862     // TODO: Once we have call site specific value information we can provide
2863     //       call site specific liveness information and then it makes
2864     //       sense to specialize attributes for call sites arguments instead of
2865     //       redirecting requests to the callee argument.
2866     Function *F = getAssociatedFunction();
2867     const IRPosition &FnPos = IRPosition::function(*F);
2868     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2869     return clampStateAndIndicateChange(getState(), FnAA.getState());
2870   }
2871 
2872   /// See AbstractAttribute::trackStatistics()
2873   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2874 };
2875 
2876 /// -------------------AAReachability Attribute--------------------------
2877 
2878 struct AAReachabilityImpl : AAReachability {
2879   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2880       : AAReachability(IRP, A) {}
2881 
2882   const std::string getAsStr() const override {
2883     // TODO: Return the number of reachable queries.
2884     return "reachable";
2885   }
2886 
2887   /// See AbstractAttribute::updateImpl(...).
2888   ChangeStatus updateImpl(Attributor &A) override {
2889     return ChangeStatus::UNCHANGED;
2890   }
2891 };
2892 
2893 struct AAReachabilityFunction final : public AAReachabilityImpl {
2894   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2895       : AAReachabilityImpl(IRP, A) {}
2896 
2897   /// See AbstractAttribute::trackStatistics()
2898   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2899 };
2900 
2901 /// ------------------------ NoAlias Argument Attribute ------------------------
2902 
2903 struct AANoAliasImpl : AANoAlias {
2904   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2905     assert(getAssociatedType()->isPointerTy() &&
2906            "Noalias is a pointer attribute");
2907   }
2908 
2909   const std::string getAsStr() const override {
2910     return getAssumed() ? "noalias" : "may-alias";
2911   }
2912 };
2913 
2914 /// NoAlias attribute for a floating value.
2915 struct AANoAliasFloating final : AANoAliasImpl {
2916   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2917       : AANoAliasImpl(IRP, A) {}
2918 
2919   /// See AbstractAttribute::initialize(...).
2920   void initialize(Attributor &A) override {
2921     AANoAliasImpl::initialize(A);
2922     Value *Val = &getAssociatedValue();
2923     do {
2924       CastInst *CI = dyn_cast<CastInst>(Val);
2925       if (!CI)
2926         break;
2927       Value *Base = CI->getOperand(0);
2928       if (!Base->hasOneUse())
2929         break;
2930       Val = Base;
2931     } while (true);
2932 
2933     if (!Val->getType()->isPointerTy()) {
2934       indicatePessimisticFixpoint();
2935       return;
2936     }
2937 
2938     if (isa<AllocaInst>(Val))
2939       indicateOptimisticFixpoint();
2940     else if (isa<ConstantPointerNull>(Val) &&
2941              !NullPointerIsDefined(getAnchorScope(),
2942                                    Val->getType()->getPointerAddressSpace()))
2943       indicateOptimisticFixpoint();
2944     else if (Val != &getAssociatedValue()) {
2945       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2946           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2947       if (ValNoAliasAA.isKnownNoAlias())
2948         indicateOptimisticFixpoint();
2949     }
2950   }
2951 
2952   /// See AbstractAttribute::updateImpl(...).
2953   ChangeStatus updateImpl(Attributor &A) override {
2954     // TODO: Implement this.
2955     return indicatePessimisticFixpoint();
2956   }
2957 
2958   /// See AbstractAttribute::trackStatistics()
2959   void trackStatistics() const override {
2960     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2961   }
2962 };
2963 
2964 /// NoAlias attribute for an argument.
2965 struct AANoAliasArgument final
2966     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2967   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2968   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2969 
2970   /// See AbstractAttribute::initialize(...).
2971   void initialize(Attributor &A) override {
2972     Base::initialize(A);
2973     // See callsite argument attribute and callee argument attribute.
2974     if (hasAttr({Attribute::ByVal}))
2975       indicateOptimisticFixpoint();
2976   }
2977 
2978   /// See AbstractAttribute::update(...).
2979   ChangeStatus updateImpl(Attributor &A) override {
2980     // We have to make sure no-alias on the argument does not break
2981     // synchronization when this is a callback argument, see also [1] below.
2982     // If synchronization cannot be affected, we delegate to the base updateImpl
2983     // function, otherwise we give up for now.
2984 
2985     // If the function is no-sync, no-alias cannot break synchronization.
2986     const auto &NoSyncAA =
2987         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2988                              DepClassTy::OPTIONAL);
2989     if (NoSyncAA.isAssumedNoSync())
2990       return Base::updateImpl(A);
2991 
2992     // If the argument is read-only, no-alias cannot break synchronization.
2993     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2994         *this, getIRPosition(), DepClassTy::OPTIONAL);
2995     if (MemBehaviorAA.isAssumedReadOnly())
2996       return Base::updateImpl(A);
2997 
2998     // If the argument is never passed through callbacks, no-alias cannot break
2999     // synchronization.
3000     bool AllCallSitesKnown;
3001     if (A.checkForAllCallSites(
3002             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3003             true, AllCallSitesKnown))
3004       return Base::updateImpl(A);
3005 
3006     // TODO: add no-alias but make sure it doesn't break synchronization by
3007     // introducing fake uses. See:
3008     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3009     //     International Workshop on OpenMP 2018,
3010     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3011 
3012     return indicatePessimisticFixpoint();
3013   }
3014 
3015   /// See AbstractAttribute::trackStatistics()
3016   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3017 };
3018 
3019 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3020   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3021       : AANoAliasImpl(IRP, A) {}
3022 
3023   /// See AbstractAttribute::initialize(...).
3024   void initialize(Attributor &A) override {
3025     // See callsite argument attribute and callee argument attribute.
3026     const auto &CB = cast<CallBase>(getAnchorValue());
3027     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3028       indicateOptimisticFixpoint();
3029     Value &Val = getAssociatedValue();
3030     if (isa<ConstantPointerNull>(Val) &&
3031         !NullPointerIsDefined(getAnchorScope(),
3032                               Val.getType()->getPointerAddressSpace()))
3033       indicateOptimisticFixpoint();
3034   }
3035 
3036   /// Determine if the underlying value may alias with the call site argument
3037   /// \p OtherArgNo of \p ICS (= the underlying call site).
3038   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3039                             const AAMemoryBehavior &MemBehaviorAA,
3040                             const CallBase &CB, unsigned OtherArgNo) {
3041     // We do not need to worry about aliasing with the underlying IRP.
3042     if (this->getCalleeArgNo() == (int)OtherArgNo)
3043       return false;
3044 
3045     // If it is not a pointer or pointer vector we do not alias.
3046     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3047     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3048       return false;
3049 
3050     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3051         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3052 
3053     // If the argument is readnone, there is no read-write aliasing.
3054     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3055       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3056       return false;
3057     }
3058 
3059     // If the argument is readonly and the underlying value is readonly, there
3060     // is no read-write aliasing.
3061     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3062     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3063       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3064       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3065       return false;
3066     }
3067 
3068     // We have to utilize actual alias analysis queries so we need the object.
3069     if (!AAR)
3070       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3071 
3072     // Try to rule it out at the call site.
3073     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3074     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3075                          "callsite arguments: "
3076                       << getAssociatedValue() << " " << *ArgOp << " => "
3077                       << (IsAliasing ? "" : "no-") << "alias \n");
3078 
3079     return IsAliasing;
3080   }
3081 
3082   bool
3083   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3084                                          const AAMemoryBehavior &MemBehaviorAA,
3085                                          const AANoAlias &NoAliasAA) {
3086     // We can deduce "noalias" if the following conditions hold.
3087     // (i)   Associated value is assumed to be noalias in the definition.
3088     // (ii)  Associated value is assumed to be no-capture in all the uses
3089     //       possibly executed before this callsite.
3090     // (iii) There is no other pointer argument which could alias with the
3091     //       value.
3092 
3093     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3094     if (!AssociatedValueIsNoAliasAtDef) {
3095       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3096                         << " is not no-alias at the definition\n");
3097       return false;
3098     }
3099 
3100     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3101 
3102     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3103     const Function *ScopeFn = VIRP.getAnchorScope();
3104     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3105     // Check whether the value is captured in the scope using AANoCapture.
3106     //      Look at CFG and check only uses possibly executed before this
3107     //      callsite.
3108     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3109       Instruction *UserI = cast<Instruction>(U.getUser());
3110 
3111       // If UserI is the curr instruction and there is a single potential use of
3112       // the value in UserI we allow the use.
3113       // TODO: We should inspect the operands and allow those that cannot alias
3114       //       with the value.
3115       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3116         return true;
3117 
3118       if (ScopeFn) {
3119         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3120             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3121 
3122         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3123           return true;
3124 
3125         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3126           if (CB->isArgOperand(&U)) {
3127 
3128             unsigned ArgNo = CB->getArgOperandNo(&U);
3129 
3130             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3131                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3132                 DepClassTy::OPTIONAL);
3133 
3134             if (NoCaptureAA.isAssumedNoCapture())
3135               return true;
3136           }
3137         }
3138       }
3139 
3140       // For cases which can potentially have more users
3141       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3142           isa<SelectInst>(U)) {
3143         Follow = true;
3144         return true;
3145       }
3146 
3147       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3148       return false;
3149     };
3150 
3151     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3152       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3153         LLVM_DEBUG(
3154             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3155                    << " cannot be noalias as it is potentially captured\n");
3156         return false;
3157       }
3158     }
3159     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3160 
3161     // Check there is no other pointer argument which could alias with the
3162     // value passed at this call site.
3163     // TODO: AbstractCallSite
3164     const auto &CB = cast<CallBase>(getAnchorValue());
3165     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3166       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3167         return false;
3168 
3169     return true;
3170   }
3171 
3172   /// See AbstractAttribute::updateImpl(...).
3173   ChangeStatus updateImpl(Attributor &A) override {
3174     // If the argument is readnone we are done as there are no accesses via the
3175     // argument.
3176     auto &MemBehaviorAA =
3177         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3178     if (MemBehaviorAA.isAssumedReadNone()) {
3179       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3180       return ChangeStatus::UNCHANGED;
3181     }
3182 
3183     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3184     const auto &NoAliasAA =
3185         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3186 
3187     AAResults *AAR = nullptr;
3188     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3189                                                NoAliasAA)) {
3190       LLVM_DEBUG(
3191           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3192       return ChangeStatus::UNCHANGED;
3193     }
3194 
3195     return indicatePessimisticFixpoint();
3196   }
3197 
3198   /// See AbstractAttribute::trackStatistics()
3199   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3200 };
3201 
3202 /// NoAlias attribute for function return value.
3203 struct AANoAliasReturned final : AANoAliasImpl {
3204   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3205       : AANoAliasImpl(IRP, A) {}
3206 
3207   /// See AbstractAttribute::initialize(...).
3208   void initialize(Attributor &A) override {
3209     AANoAliasImpl::initialize(A);
3210     Function *F = getAssociatedFunction();
3211     if (!F || F->isDeclaration())
3212       indicatePessimisticFixpoint();
3213   }
3214 
3215   /// See AbstractAttribute::updateImpl(...).
3216   virtual ChangeStatus updateImpl(Attributor &A) override {
3217 
3218     auto CheckReturnValue = [&](Value &RV) -> bool {
3219       if (Constant *C = dyn_cast<Constant>(&RV))
3220         if (C->isNullValue() || isa<UndefValue>(C))
3221           return true;
3222 
3223       /// For now, we can only deduce noalias if we have call sites.
3224       /// FIXME: add more support.
3225       if (!isa<CallBase>(&RV))
3226         return false;
3227 
3228       const IRPosition &RVPos = IRPosition::value(RV);
3229       const auto &NoAliasAA =
3230           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3231       if (!NoAliasAA.isAssumedNoAlias())
3232         return false;
3233 
3234       const auto &NoCaptureAA =
3235           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3236       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3237     };
3238 
3239     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3240       return indicatePessimisticFixpoint();
3241 
3242     return ChangeStatus::UNCHANGED;
3243   }
3244 
3245   /// See AbstractAttribute::trackStatistics()
3246   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3247 };
3248 
3249 /// NoAlias attribute deduction for a call site return value.
3250 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3251   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3252       : AANoAliasImpl(IRP, A) {}
3253 
3254   /// See AbstractAttribute::initialize(...).
3255   void initialize(Attributor &A) override {
3256     AANoAliasImpl::initialize(A);
3257     Function *F = getAssociatedFunction();
3258     if (!F || F->isDeclaration())
3259       indicatePessimisticFixpoint();
3260   }
3261 
3262   /// See AbstractAttribute::updateImpl(...).
3263   ChangeStatus updateImpl(Attributor &A) override {
3264     // TODO: Once we have call site specific value information we can provide
3265     //       call site specific liveness information and then it makes
3266     //       sense to specialize attributes for call sites arguments instead of
3267     //       redirecting requests to the callee argument.
3268     Function *F = getAssociatedFunction();
3269     const IRPosition &FnPos = IRPosition::returned(*F);
3270     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3271     return clampStateAndIndicateChange(getState(), FnAA.getState());
3272   }
3273 
3274   /// See AbstractAttribute::trackStatistics()
3275   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3276 };
3277 
3278 /// -------------------AAIsDead Function Attribute-----------------------
3279 
3280 struct AAIsDeadValueImpl : public AAIsDead {
3281   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3282 
3283   /// See AAIsDead::isAssumedDead().
3284   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3285 
3286   /// See AAIsDead::isKnownDead().
3287   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3288 
3289   /// See AAIsDead::isAssumedDead(BasicBlock *).
3290   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3291 
3292   /// See AAIsDead::isKnownDead(BasicBlock *).
3293   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3294 
3295   /// See AAIsDead::isAssumedDead(Instruction *I).
3296   bool isAssumedDead(const Instruction *I) const override {
3297     return I == getCtxI() && isAssumedDead();
3298   }
3299 
3300   /// See AAIsDead::isKnownDead(Instruction *I).
3301   bool isKnownDead(const Instruction *I) const override {
3302     return isAssumedDead(I) && isKnownDead();
3303   }
3304 
3305   /// See AbstractAttribute::getAsStr().
3306   const std::string getAsStr() const override {
3307     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3308   }
3309 
3310   /// Check if all uses are assumed dead.
3311   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3312     // Callers might not check the type, void has no uses.
3313     if (V.getType()->isVoidTy())
3314       return true;
3315 
3316     // If we replace a value with a constant there are no uses left afterwards.
3317     if (!isa<Constant>(V)) {
3318       bool UsedAssumedInformation = false;
3319       Optional<Constant *> C =
3320           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3321       if (!C.hasValue() || *C)
3322         return true;
3323     }
3324 
3325     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3326     // Explicitly set the dependence class to required because we want a long
3327     // chain of N dependent instructions to be considered live as soon as one is
3328     // without going through N update cycles. This is not required for
3329     // correctness.
3330     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3331                              DepClassTy::REQUIRED);
3332   }
3333 
3334   /// Determine if \p I is assumed to be side-effect free.
3335   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3336     if (!I || wouldInstructionBeTriviallyDead(I))
3337       return true;
3338 
3339     auto *CB = dyn_cast<CallBase>(I);
3340     if (!CB || isa<IntrinsicInst>(CB))
3341       return false;
3342 
3343     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3344     const auto &NoUnwindAA =
3345         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3346     if (!NoUnwindAA.isAssumedNoUnwind())
3347       return false;
3348     if (!NoUnwindAA.isKnownNoUnwind())
3349       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3350 
3351     const auto &MemBehaviorAA =
3352         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3353     if (MemBehaviorAA.isAssumedReadOnly()) {
3354       if (!MemBehaviorAA.isKnownReadOnly())
3355         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3356       return true;
3357     }
3358     return false;
3359   }
3360 };
3361 
3362 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3363   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3364       : AAIsDeadValueImpl(IRP, A) {}
3365 
3366   /// See AbstractAttribute::initialize(...).
3367   void initialize(Attributor &A) override {
3368     if (isa<UndefValue>(getAssociatedValue())) {
3369       indicatePessimisticFixpoint();
3370       return;
3371     }
3372 
3373     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3374     if (!isAssumedSideEffectFree(A, I)) {
3375       if (!isa_and_nonnull<StoreInst>(I))
3376         indicatePessimisticFixpoint();
3377       else
3378         removeAssumedBits(HAS_NO_EFFECT);
3379     }
3380   }
3381 
3382   bool isDeadStore(Attributor &A, StoreInst &SI) {
3383     // Lang ref now states volatile store is not UB/dead, let's skip them.
3384     if (SI.isVolatile())
3385       return false;
3386 
3387     bool UsedAssumedInformation = false;
3388     SmallSetVector<Value *, 4> PotentialCopies;
3389     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3390                                              UsedAssumedInformation))
3391       return false;
3392     return llvm::all_of(PotentialCopies, [&](Value *V) {
3393       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3394                              UsedAssumedInformation);
3395     });
3396   }
3397 
3398   /// See AbstractAttribute::updateImpl(...).
3399   ChangeStatus updateImpl(Attributor &A) override {
3400     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3401     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3402       if (!isDeadStore(A, *SI))
3403         return indicatePessimisticFixpoint();
3404     } else {
3405       if (!isAssumedSideEffectFree(A, I))
3406         return indicatePessimisticFixpoint();
3407       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3408         return indicatePessimisticFixpoint();
3409     }
3410     return ChangeStatus::UNCHANGED;
3411   }
3412 
3413   /// See AbstractAttribute::manifest(...).
3414   ChangeStatus manifest(Attributor &A) override {
3415     Value &V = getAssociatedValue();
3416     if (auto *I = dyn_cast<Instruction>(&V)) {
3417       // If we get here we basically know the users are all dead. We check if
3418       // isAssumedSideEffectFree returns true here again because it might not be
3419       // the case and only the users are dead but the instruction (=call) is
3420       // still needed.
3421       if (isa<StoreInst>(I) ||
3422           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3423         A.deleteAfterManifest(*I);
3424         return ChangeStatus::CHANGED;
3425       }
3426     }
3427     if (V.use_empty())
3428       return ChangeStatus::UNCHANGED;
3429 
3430     bool UsedAssumedInformation = false;
3431     Optional<Constant *> C =
3432         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3433     if (C.hasValue() && C.getValue())
3434       return ChangeStatus::UNCHANGED;
3435 
3436     // Replace the value with undef as it is dead but keep droppable uses around
3437     // as they provide information we don't want to give up on just yet.
3438     UndefValue &UV = *UndefValue::get(V.getType());
3439     bool AnyChange =
3440         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3441     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3442   }
3443 
3444   /// See AbstractAttribute::trackStatistics()
3445   void trackStatistics() const override {
3446     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3447   }
3448 };
3449 
3450 struct AAIsDeadArgument : public AAIsDeadFloating {
3451   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3452       : AAIsDeadFloating(IRP, A) {}
3453 
3454   /// See AbstractAttribute::initialize(...).
3455   void initialize(Attributor &A) override {
3456     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3457       indicatePessimisticFixpoint();
3458   }
3459 
3460   /// See AbstractAttribute::manifest(...).
3461   ChangeStatus manifest(Attributor &A) override {
3462     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3463     Argument &Arg = *getAssociatedArgument();
3464     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3465       if (A.registerFunctionSignatureRewrite(
3466               Arg, /* ReplacementTypes */ {},
3467               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3468               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3469         Arg.dropDroppableUses();
3470         return ChangeStatus::CHANGED;
3471       }
3472     return Changed;
3473   }
3474 
3475   /// See AbstractAttribute::trackStatistics()
3476   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3477 };
3478 
3479 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3480   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3481       : AAIsDeadValueImpl(IRP, A) {}
3482 
3483   /// See AbstractAttribute::initialize(...).
3484   void initialize(Attributor &A) override {
3485     if (isa<UndefValue>(getAssociatedValue()))
3486       indicatePessimisticFixpoint();
3487   }
3488 
3489   /// See AbstractAttribute::updateImpl(...).
3490   ChangeStatus updateImpl(Attributor &A) override {
3491     // TODO: Once we have call site specific value information we can provide
3492     //       call site specific liveness information and then it makes
3493     //       sense to specialize attributes for call sites arguments instead of
3494     //       redirecting requests to the callee argument.
3495     Argument *Arg = getAssociatedArgument();
3496     if (!Arg)
3497       return indicatePessimisticFixpoint();
3498     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3499     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3500     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3501   }
3502 
3503   /// See AbstractAttribute::manifest(...).
3504   ChangeStatus manifest(Attributor &A) override {
3505     CallBase &CB = cast<CallBase>(getAnchorValue());
3506     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3507     assert(!isa<UndefValue>(U.get()) &&
3508            "Expected undef values to be filtered out!");
3509     UndefValue &UV = *UndefValue::get(U->getType());
3510     if (A.changeUseAfterManifest(U, UV))
3511       return ChangeStatus::CHANGED;
3512     return ChangeStatus::UNCHANGED;
3513   }
3514 
3515   /// See AbstractAttribute::trackStatistics()
3516   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3517 };
3518 
3519 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3520   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3521       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3522 
3523   /// See AAIsDead::isAssumedDead().
3524   bool isAssumedDead() const override {
3525     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3526   }
3527 
3528   /// See AbstractAttribute::initialize(...).
3529   void initialize(Attributor &A) override {
3530     if (isa<UndefValue>(getAssociatedValue())) {
3531       indicatePessimisticFixpoint();
3532       return;
3533     }
3534 
3535     // We track this separately as a secondary state.
3536     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3537   }
3538 
3539   /// See AbstractAttribute::updateImpl(...).
3540   ChangeStatus updateImpl(Attributor &A) override {
3541     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3542     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3543       IsAssumedSideEffectFree = false;
3544       Changed = ChangeStatus::CHANGED;
3545     }
3546     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3547       return indicatePessimisticFixpoint();
3548     return Changed;
3549   }
3550 
3551   /// See AbstractAttribute::trackStatistics()
3552   void trackStatistics() const override {
3553     if (IsAssumedSideEffectFree)
3554       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3555     else
3556       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3557   }
3558 
3559   /// See AbstractAttribute::getAsStr().
3560   const std::string getAsStr() const override {
3561     return isAssumedDead()
3562                ? "assumed-dead"
3563                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3564   }
3565 
3566 private:
3567   bool IsAssumedSideEffectFree;
3568 };
3569 
3570 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3571   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3572       : AAIsDeadValueImpl(IRP, A) {}
3573 
3574   /// See AbstractAttribute::updateImpl(...).
3575   ChangeStatus updateImpl(Attributor &A) override {
3576 
3577     bool UsedAssumedInformation = false;
3578     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3579                               {Instruction::Ret}, UsedAssumedInformation);
3580 
3581     auto PredForCallSite = [&](AbstractCallSite ACS) {
3582       if (ACS.isCallbackCall() || !ACS.getInstruction())
3583         return false;
3584       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3585     };
3586 
3587     bool AllCallSitesKnown;
3588     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3589                                 AllCallSitesKnown))
3590       return indicatePessimisticFixpoint();
3591 
3592     return ChangeStatus::UNCHANGED;
3593   }
3594 
3595   /// See AbstractAttribute::manifest(...).
3596   ChangeStatus manifest(Attributor &A) override {
3597     // TODO: Rewrite the signature to return void?
3598     bool AnyChange = false;
3599     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3600     auto RetInstPred = [&](Instruction &I) {
3601       ReturnInst &RI = cast<ReturnInst>(I);
3602       if (!isa<UndefValue>(RI.getReturnValue()))
3603         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3604       return true;
3605     };
3606     bool UsedAssumedInformation = false;
3607     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3608                               UsedAssumedInformation);
3609     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3610   }
3611 
3612   /// See AbstractAttribute::trackStatistics()
3613   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3614 };
3615 
3616 struct AAIsDeadFunction : public AAIsDead {
3617   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3618 
3619   /// See AbstractAttribute::initialize(...).
3620   void initialize(Attributor &A) override {
3621     const Function *F = getAnchorScope();
3622     if (F && !F->isDeclaration()) {
3623       // We only want to compute liveness once. If the function is not part of
3624       // the SCC, skip it.
3625       if (A.isRunOn(*const_cast<Function *>(F))) {
3626         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3627         assumeLive(A, F->getEntryBlock());
3628       } else {
3629         indicatePessimisticFixpoint();
3630       }
3631     }
3632   }
3633 
3634   /// See AbstractAttribute::getAsStr().
3635   const std::string getAsStr() const override {
3636     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3637            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3638            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3639            std::to_string(KnownDeadEnds.size()) + "]";
3640   }
3641 
3642   /// See AbstractAttribute::manifest(...).
3643   ChangeStatus manifest(Attributor &A) override {
3644     assert(getState().isValidState() &&
3645            "Attempted to manifest an invalid state!");
3646 
3647     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3648     Function &F = *getAnchorScope();
3649 
3650     if (AssumedLiveBlocks.empty()) {
3651       A.deleteAfterManifest(F);
3652       return ChangeStatus::CHANGED;
3653     }
3654 
3655     // Flag to determine if we can change an invoke to a call assuming the
3656     // callee is nounwind. This is not possible if the personality of the
3657     // function allows to catch asynchronous exceptions.
3658     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3659 
3660     KnownDeadEnds.set_union(ToBeExploredFrom);
3661     for (const Instruction *DeadEndI : KnownDeadEnds) {
3662       auto *CB = dyn_cast<CallBase>(DeadEndI);
3663       if (!CB)
3664         continue;
3665       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3666           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3667       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3668       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3669         continue;
3670 
3671       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3672         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3673       else
3674         A.changeToUnreachableAfterManifest(
3675             const_cast<Instruction *>(DeadEndI->getNextNode()));
3676       HasChanged = ChangeStatus::CHANGED;
3677     }
3678 
3679     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3680     for (BasicBlock &BB : F)
3681       if (!AssumedLiveBlocks.count(&BB)) {
3682         A.deleteAfterManifest(BB);
3683         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3684       }
3685 
3686     return HasChanged;
3687   }
3688 
3689   /// See AbstractAttribute::updateImpl(...).
3690   ChangeStatus updateImpl(Attributor &A) override;
3691 
3692   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3693     return !AssumedLiveEdges.count(std::make_pair(From, To));
3694   }
3695 
3696   /// See AbstractAttribute::trackStatistics()
3697   void trackStatistics() const override {}
3698 
3699   /// Returns true if the function is assumed dead.
3700   bool isAssumedDead() const override { return false; }
3701 
3702   /// See AAIsDead::isKnownDead().
3703   bool isKnownDead() const override { return false; }
3704 
3705   /// See AAIsDead::isAssumedDead(BasicBlock *).
3706   bool isAssumedDead(const BasicBlock *BB) const override {
3707     assert(BB->getParent() == getAnchorScope() &&
3708            "BB must be in the same anchor scope function.");
3709 
3710     if (!getAssumed())
3711       return false;
3712     return !AssumedLiveBlocks.count(BB);
3713   }
3714 
3715   /// See AAIsDead::isKnownDead(BasicBlock *).
3716   bool isKnownDead(const BasicBlock *BB) const override {
3717     return getKnown() && isAssumedDead(BB);
3718   }
3719 
3720   /// See AAIsDead::isAssumed(Instruction *I).
3721   bool isAssumedDead(const Instruction *I) const override {
3722     assert(I->getParent()->getParent() == getAnchorScope() &&
3723            "Instruction must be in the same anchor scope function.");
3724 
3725     if (!getAssumed())
3726       return false;
3727 
3728     // If it is not in AssumedLiveBlocks then it for sure dead.
3729     // Otherwise, it can still be after noreturn call in a live block.
3730     if (!AssumedLiveBlocks.count(I->getParent()))
3731       return true;
3732 
3733     // If it is not after a liveness barrier it is live.
3734     const Instruction *PrevI = I->getPrevNode();
3735     while (PrevI) {
3736       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3737         return true;
3738       PrevI = PrevI->getPrevNode();
3739     }
3740     return false;
3741   }
3742 
3743   /// See AAIsDead::isKnownDead(Instruction *I).
3744   bool isKnownDead(const Instruction *I) const override {
3745     return getKnown() && isAssumedDead(I);
3746   }
3747 
3748   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3749   /// that internal function called from \p BB should now be looked at.
3750   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3751     if (!AssumedLiveBlocks.insert(&BB).second)
3752       return false;
3753 
3754     // We assume that all of BB is (probably) live now and if there are calls to
3755     // internal functions we will assume that those are now live as well. This
3756     // is a performance optimization for blocks with calls to a lot of internal
3757     // functions. It can however cause dead functions to be treated as live.
3758     for (const Instruction &I : BB)
3759       if (const auto *CB = dyn_cast<CallBase>(&I))
3760         if (const Function *F = CB->getCalledFunction())
3761           if (F->hasLocalLinkage())
3762             A.markLiveInternalFunction(*F);
3763     return true;
3764   }
3765 
3766   /// Collection of instructions that need to be explored again, e.g., we
3767   /// did assume they do not transfer control to (one of their) successors.
3768   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3769 
3770   /// Collection of instructions that are known to not transfer control.
3771   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3772 
3773   /// Collection of all assumed live edges
3774   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3775 
3776   /// Collection of all assumed live BasicBlocks.
3777   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3778 };
3779 
3780 static bool
3781 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3782                         AbstractAttribute &AA,
3783                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3784   const IRPosition &IPos = IRPosition::callsite_function(CB);
3785 
3786   const auto &NoReturnAA =
3787       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3788   if (NoReturnAA.isAssumedNoReturn())
3789     return !NoReturnAA.isKnownNoReturn();
3790   if (CB.isTerminator())
3791     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3792   else
3793     AliveSuccessors.push_back(CB.getNextNode());
3794   return false;
3795 }
3796 
3797 static bool
3798 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3799                         AbstractAttribute &AA,
3800                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3801   bool UsedAssumedInformation =
3802       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3803 
3804   // First, determine if we can change an invoke to a call assuming the
3805   // callee is nounwind. This is not possible if the personality of the
3806   // function allows to catch asynchronous exceptions.
3807   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3808     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3809   } else {
3810     const IRPosition &IPos = IRPosition::callsite_function(II);
3811     const auto &AANoUnw =
3812         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3813     if (AANoUnw.isAssumedNoUnwind()) {
3814       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3815     } else {
3816       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3817     }
3818   }
3819   return UsedAssumedInformation;
3820 }
3821 
3822 static bool
3823 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3824                         AbstractAttribute &AA,
3825                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3826   bool UsedAssumedInformation = false;
3827   if (BI.getNumSuccessors() == 1) {
3828     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3829   } else {
3830     Optional<Constant *> C =
3831         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3832     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3833       // No value yet, assume both edges are dead.
3834     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3835       const BasicBlock *SuccBB =
3836           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3837       AliveSuccessors.push_back(&SuccBB->front());
3838     } else {
3839       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3840       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3841       UsedAssumedInformation = false;
3842     }
3843   }
3844   return UsedAssumedInformation;
3845 }
3846 
3847 static bool
3848 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3849                         AbstractAttribute &AA,
3850                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3851   bool UsedAssumedInformation = false;
3852   Optional<Constant *> C =
3853       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3854   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3855     // No value yet, assume all edges are dead.
3856   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3857     for (auto &CaseIt : SI.cases()) {
3858       if (CaseIt.getCaseValue() == C.getValue()) {
3859         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3860         return UsedAssumedInformation;
3861       }
3862     }
3863     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3864     return UsedAssumedInformation;
3865   } else {
3866     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3867       AliveSuccessors.push_back(&SuccBB->front());
3868   }
3869   return UsedAssumedInformation;
3870 }
3871 
3872 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3873   ChangeStatus Change = ChangeStatus::UNCHANGED;
3874 
3875   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3876                     << getAnchorScope()->size() << "] BBs and "
3877                     << ToBeExploredFrom.size() << " exploration points and "
3878                     << KnownDeadEnds.size() << " known dead ends\n");
3879 
3880   // Copy and clear the list of instructions we need to explore from. It is
3881   // refilled with instructions the next update has to look at.
3882   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3883                                                ToBeExploredFrom.end());
3884   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3885 
3886   SmallVector<const Instruction *, 8> AliveSuccessors;
3887   while (!Worklist.empty()) {
3888     const Instruction *I = Worklist.pop_back_val();
3889     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3890 
3891     // Fast forward for uninteresting instructions. We could look for UB here
3892     // though.
3893     while (!I->isTerminator() && !isa<CallBase>(I))
3894       I = I->getNextNode();
3895 
3896     AliveSuccessors.clear();
3897 
3898     bool UsedAssumedInformation = false;
3899     switch (I->getOpcode()) {
3900     // TODO: look for (assumed) UB to backwards propagate "deadness".
3901     default:
3902       assert(I->isTerminator() &&
3903              "Expected non-terminators to be handled already!");
3904       for (const BasicBlock *SuccBB : successors(I->getParent()))
3905         AliveSuccessors.push_back(&SuccBB->front());
3906       break;
3907     case Instruction::Call:
3908       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3909                                                        *this, AliveSuccessors);
3910       break;
3911     case Instruction::Invoke:
3912       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3913                                                        *this, AliveSuccessors);
3914       break;
3915     case Instruction::Br:
3916       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3917                                                        *this, AliveSuccessors);
3918       break;
3919     case Instruction::Switch:
3920       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3921                                                        *this, AliveSuccessors);
3922       break;
3923     }
3924 
3925     if (UsedAssumedInformation) {
3926       NewToBeExploredFrom.insert(I);
3927     } else if (AliveSuccessors.empty() ||
3928                (I->isTerminator() &&
3929                 AliveSuccessors.size() < I->getNumSuccessors())) {
3930       if (KnownDeadEnds.insert(I))
3931         Change = ChangeStatus::CHANGED;
3932     }
3933 
3934     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3935                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3936                       << UsedAssumedInformation << "\n");
3937 
3938     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3939       if (!I->isTerminator()) {
3940         assert(AliveSuccessors.size() == 1 &&
3941                "Non-terminator expected to have a single successor!");
3942         Worklist.push_back(AliveSuccessor);
3943       } else {
3944         // record the assumed live edge
3945         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3946         if (AssumedLiveEdges.insert(Edge).second)
3947           Change = ChangeStatus::CHANGED;
3948         if (assumeLive(A, *AliveSuccessor->getParent()))
3949           Worklist.push_back(AliveSuccessor);
3950       }
3951     }
3952   }
3953 
3954   // Check if the content of ToBeExploredFrom changed, ignore the order.
3955   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3956       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3957         return !ToBeExploredFrom.count(I);
3958       })) {
3959     Change = ChangeStatus::CHANGED;
3960     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3961   }
3962 
3963   // If we know everything is live there is no need to query for liveness.
3964   // Instead, indicating a pessimistic fixpoint will cause the state to be
3965   // "invalid" and all queries to be answered conservatively without lookups.
3966   // To be in this state we have to (1) finished the exploration and (3) not
3967   // discovered any non-trivial dead end and (2) not ruled unreachable code
3968   // dead.
3969   if (ToBeExploredFrom.empty() &&
3970       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3971       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3972         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3973       }))
3974     return indicatePessimisticFixpoint();
3975   return Change;
3976 }
3977 
3978 /// Liveness information for a call sites.
3979 struct AAIsDeadCallSite final : AAIsDeadFunction {
3980   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3981       : AAIsDeadFunction(IRP, A) {}
3982 
3983   /// See AbstractAttribute::initialize(...).
3984   void initialize(Attributor &A) override {
3985     // TODO: Once we have call site specific value information we can provide
3986     //       call site specific liveness information and then it makes
3987     //       sense to specialize attributes for call sites instead of
3988     //       redirecting requests to the callee.
3989     llvm_unreachable("Abstract attributes for liveness are not "
3990                      "supported for call sites yet!");
3991   }
3992 
3993   /// See AbstractAttribute::updateImpl(...).
3994   ChangeStatus updateImpl(Attributor &A) override {
3995     return indicatePessimisticFixpoint();
3996   }
3997 
3998   /// See AbstractAttribute::trackStatistics()
3999   void trackStatistics() const override {}
4000 };
4001 
4002 /// -------------------- Dereferenceable Argument Attribute --------------------
4003 
4004 struct AADereferenceableImpl : AADereferenceable {
4005   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4006       : AADereferenceable(IRP, A) {}
4007   using StateType = DerefState;
4008 
4009   /// See AbstractAttribute::initialize(...).
4010   void initialize(Attributor &A) override {
4011     SmallVector<Attribute, 4> Attrs;
4012     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4013              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4014     for (const Attribute &Attr : Attrs)
4015       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4016 
4017     const IRPosition &IRP = this->getIRPosition();
4018     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4019 
4020     bool CanBeNull, CanBeFreed;
4021     takeKnownDerefBytesMaximum(
4022         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4023             A.getDataLayout(), CanBeNull, CanBeFreed));
4024 
4025     bool IsFnInterface = IRP.isFnInterfaceKind();
4026     Function *FnScope = IRP.getAnchorScope();
4027     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4028       indicatePessimisticFixpoint();
4029       return;
4030     }
4031 
4032     if (Instruction *CtxI = getCtxI())
4033       followUsesInMBEC(*this, A, getState(), *CtxI);
4034   }
4035 
4036   /// See AbstractAttribute::getState()
4037   /// {
4038   StateType &getState() override { return *this; }
4039   const StateType &getState() const override { return *this; }
4040   /// }
4041 
4042   /// Helper function for collecting accessed bytes in must-be-executed-context
4043   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4044                               DerefState &State) {
4045     const Value *UseV = U->get();
4046     if (!UseV->getType()->isPointerTy())
4047       return;
4048 
4049     Type *PtrTy = UseV->getType();
4050     const DataLayout &DL = A.getDataLayout();
4051     int64_t Offset;
4052     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4053             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4054       if (Base == &getAssociatedValue() &&
4055           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4056         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4057         State.addAccessedBytes(Offset, Size);
4058       }
4059     }
4060   }
4061 
4062   /// See followUsesInMBEC
4063   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4064                        AADereferenceable::StateType &State) {
4065     bool IsNonNull = false;
4066     bool TrackUse = false;
4067     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4068         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4069     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4070                       << " for instruction " << *I << "\n");
4071 
4072     addAccessedBytesForUse(A, U, I, State);
4073     State.takeKnownDerefBytesMaximum(DerefBytes);
4074     return TrackUse;
4075   }
4076 
4077   /// See AbstractAttribute::manifest(...).
4078   ChangeStatus manifest(Attributor &A) override {
4079     ChangeStatus Change = AADereferenceable::manifest(A);
4080     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4081       removeAttrs({Attribute::DereferenceableOrNull});
4082       return ChangeStatus::CHANGED;
4083     }
4084     return Change;
4085   }
4086 
4087   void getDeducedAttributes(LLVMContext &Ctx,
4088                             SmallVectorImpl<Attribute> &Attrs) const override {
4089     // TODO: Add *_globally support
4090     if (isAssumedNonNull())
4091       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4092           Ctx, getAssumedDereferenceableBytes()));
4093     else
4094       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4095           Ctx, getAssumedDereferenceableBytes()));
4096   }
4097 
4098   /// See AbstractAttribute::getAsStr().
4099   const std::string getAsStr() const override {
4100     if (!getAssumedDereferenceableBytes())
4101       return "unknown-dereferenceable";
4102     return std::string("dereferenceable") +
4103            (isAssumedNonNull() ? "" : "_or_null") +
4104            (isAssumedGlobal() ? "_globally" : "") + "<" +
4105            std::to_string(getKnownDereferenceableBytes()) + "-" +
4106            std::to_string(getAssumedDereferenceableBytes()) + ">";
4107   }
4108 };
4109 
4110 /// Dereferenceable attribute for a floating value.
4111 struct AADereferenceableFloating : AADereferenceableImpl {
4112   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4113       : AADereferenceableImpl(IRP, A) {}
4114 
4115   /// See AbstractAttribute::updateImpl(...).
4116   ChangeStatus updateImpl(Attributor &A) override {
4117     const DataLayout &DL = A.getDataLayout();
4118 
4119     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4120                             bool Stripped) -> bool {
4121       unsigned IdxWidth =
4122           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4123       APInt Offset(IdxWidth, 0);
4124       const Value *Base =
4125           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4126 
4127       const auto &AA = A.getAAFor<AADereferenceable>(
4128           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4129       int64_t DerefBytes = 0;
4130       if (!Stripped && this == &AA) {
4131         // Use IR information if we did not strip anything.
4132         // TODO: track globally.
4133         bool CanBeNull, CanBeFreed;
4134         DerefBytes =
4135             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4136         T.GlobalState.indicatePessimisticFixpoint();
4137       } else {
4138         const DerefState &DS = AA.getState();
4139         DerefBytes = DS.DerefBytesState.getAssumed();
4140         T.GlobalState &= DS.GlobalState;
4141       }
4142 
4143       // For now we do not try to "increase" dereferenceability due to negative
4144       // indices as we first have to come up with code to deal with loops and
4145       // for overflows of the dereferenceable bytes.
4146       int64_t OffsetSExt = Offset.getSExtValue();
4147       if (OffsetSExt < 0)
4148         OffsetSExt = 0;
4149 
4150       T.takeAssumedDerefBytesMinimum(
4151           std::max(int64_t(0), DerefBytes - OffsetSExt));
4152 
4153       if (this == &AA) {
4154         if (!Stripped) {
4155           // If nothing was stripped IR information is all we got.
4156           T.takeKnownDerefBytesMaximum(
4157               std::max(int64_t(0), DerefBytes - OffsetSExt));
4158           T.indicatePessimisticFixpoint();
4159         } else if (OffsetSExt > 0) {
4160           // If something was stripped but there is circular reasoning we look
4161           // for the offset. If it is positive we basically decrease the
4162           // dereferenceable bytes in a circluar loop now, which will simply
4163           // drive them down to the known value in a very slow way which we
4164           // can accelerate.
4165           T.indicatePessimisticFixpoint();
4166         }
4167       }
4168 
4169       return T.isValidState();
4170     };
4171 
4172     DerefState T;
4173     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4174                                            VisitValueCB, getCtxI()))
4175       return indicatePessimisticFixpoint();
4176 
4177     return clampStateAndIndicateChange(getState(), T);
4178   }
4179 
4180   /// See AbstractAttribute::trackStatistics()
4181   void trackStatistics() const override {
4182     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4183   }
4184 };
4185 
4186 /// Dereferenceable attribute for a return value.
4187 struct AADereferenceableReturned final
4188     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4189   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4190       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4191             IRP, A) {}
4192 
4193   /// See AbstractAttribute::trackStatistics()
4194   void trackStatistics() const override {
4195     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4196   }
4197 };
4198 
4199 /// Dereferenceable attribute for an argument
4200 struct AADereferenceableArgument final
4201     : AAArgumentFromCallSiteArguments<AADereferenceable,
4202                                       AADereferenceableImpl> {
4203   using Base =
4204       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4205   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4206       : Base(IRP, A) {}
4207 
4208   /// See AbstractAttribute::trackStatistics()
4209   void trackStatistics() const override {
4210     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4211   }
4212 };
4213 
4214 /// Dereferenceable attribute for a call site argument.
4215 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4216   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4217       : AADereferenceableFloating(IRP, A) {}
4218 
4219   /// See AbstractAttribute::trackStatistics()
4220   void trackStatistics() const override {
4221     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4222   }
4223 };
4224 
4225 /// Dereferenceable attribute deduction for a call site return value.
4226 struct AADereferenceableCallSiteReturned final
4227     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4228   using Base =
4229       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4230   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4231       : Base(IRP, A) {}
4232 
4233   /// See AbstractAttribute::trackStatistics()
4234   void trackStatistics() const override {
4235     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4236   }
4237 };
4238 
4239 // ------------------------ Align Argument Attribute ------------------------
4240 
4241 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4242                                     Value &AssociatedValue, const Use *U,
4243                                     const Instruction *I, bool &TrackUse) {
4244   // We need to follow common pointer manipulation uses to the accesses they
4245   // feed into.
4246   if (isa<CastInst>(I)) {
4247     // Follow all but ptr2int casts.
4248     TrackUse = !isa<PtrToIntInst>(I);
4249     return 0;
4250   }
4251   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4252     if (GEP->hasAllConstantIndices())
4253       TrackUse = true;
4254     return 0;
4255   }
4256 
4257   MaybeAlign MA;
4258   if (const auto *CB = dyn_cast<CallBase>(I)) {
4259     if (CB->isBundleOperand(U) || CB->isCallee(U))
4260       return 0;
4261 
4262     unsigned ArgNo = CB->getArgOperandNo(U);
4263     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4264     // As long as we only use known information there is no need to track
4265     // dependences here.
4266     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4267     MA = MaybeAlign(AlignAA.getKnownAlign());
4268   }
4269 
4270   const DataLayout &DL = A.getDataLayout();
4271   const Value *UseV = U->get();
4272   if (auto *SI = dyn_cast<StoreInst>(I)) {
4273     if (SI->getPointerOperand() == UseV)
4274       MA = SI->getAlign();
4275   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4276     if (LI->getPointerOperand() == UseV)
4277       MA = LI->getAlign();
4278   }
4279 
4280   if (!MA || *MA <= QueryingAA.getKnownAlign())
4281     return 0;
4282 
4283   unsigned Alignment = MA->value();
4284   int64_t Offset;
4285 
4286   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4287     if (Base == &AssociatedValue) {
4288       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4289       // So we can say that the maximum power of two which is a divisor of
4290       // gcd(Offset, Alignment) is an alignment.
4291 
4292       uint32_t gcd =
4293           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4294       Alignment = llvm::PowerOf2Floor(gcd);
4295     }
4296   }
4297 
4298   return Alignment;
4299 }
4300 
4301 struct AAAlignImpl : AAAlign {
4302   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4303 
4304   /// See AbstractAttribute::initialize(...).
4305   void initialize(Attributor &A) override {
4306     SmallVector<Attribute, 4> Attrs;
4307     getAttrs({Attribute::Alignment}, Attrs);
4308     for (const Attribute &Attr : Attrs)
4309       takeKnownMaximum(Attr.getValueAsInt());
4310 
4311     Value &V = getAssociatedValue();
4312     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4313     //       use of the function pointer. This was caused by D73131. We want to
4314     //       avoid this for function pointers especially because we iterate
4315     //       their uses and int2ptr is not handled. It is not a correctness
4316     //       problem though!
4317     if (!V.getType()->getPointerElementType()->isFunctionTy())
4318       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4319 
4320     if (getIRPosition().isFnInterfaceKind() &&
4321         (!getAnchorScope() ||
4322          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4323       indicatePessimisticFixpoint();
4324       return;
4325     }
4326 
4327     if (Instruction *CtxI = getCtxI())
4328       followUsesInMBEC(*this, A, getState(), *CtxI);
4329   }
4330 
4331   /// See AbstractAttribute::manifest(...).
4332   ChangeStatus manifest(Attributor &A) override {
4333     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4334 
4335     // Check for users that allow alignment annotations.
4336     Value &AssociatedValue = getAssociatedValue();
4337     for (const Use &U : AssociatedValue.uses()) {
4338       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4339         if (SI->getPointerOperand() == &AssociatedValue)
4340           if (SI->getAlignment() < getAssumedAlign()) {
4341             STATS_DECLTRACK(AAAlign, Store,
4342                             "Number of times alignment added to a store");
4343             SI->setAlignment(Align(getAssumedAlign()));
4344             LoadStoreChanged = ChangeStatus::CHANGED;
4345           }
4346       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4347         if (LI->getPointerOperand() == &AssociatedValue)
4348           if (LI->getAlignment() < getAssumedAlign()) {
4349             LI->setAlignment(Align(getAssumedAlign()));
4350             STATS_DECLTRACK(AAAlign, Load,
4351                             "Number of times alignment added to a load");
4352             LoadStoreChanged = ChangeStatus::CHANGED;
4353           }
4354       }
4355     }
4356 
4357     ChangeStatus Changed = AAAlign::manifest(A);
4358 
4359     Align InheritAlign =
4360         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4361     if (InheritAlign >= getAssumedAlign())
4362       return LoadStoreChanged;
4363     return Changed | LoadStoreChanged;
4364   }
4365 
4366   // TODO: Provide a helper to determine the implied ABI alignment and check in
4367   //       the existing manifest method and a new one for AAAlignImpl that value
4368   //       to avoid making the alignment explicit if it did not improve.
4369 
4370   /// See AbstractAttribute::getDeducedAttributes
4371   virtual void
4372   getDeducedAttributes(LLVMContext &Ctx,
4373                        SmallVectorImpl<Attribute> &Attrs) const override {
4374     if (getAssumedAlign() > 1)
4375       Attrs.emplace_back(
4376           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4377   }
4378 
4379   /// See followUsesInMBEC
4380   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4381                        AAAlign::StateType &State) {
4382     bool TrackUse = false;
4383 
4384     unsigned int KnownAlign =
4385         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4386     State.takeKnownMaximum(KnownAlign);
4387 
4388     return TrackUse;
4389   }
4390 
4391   /// See AbstractAttribute::getAsStr().
4392   const std::string getAsStr() const override {
4393     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4394                                 "-" + std::to_string(getAssumedAlign()) + ">")
4395                              : "unknown-align";
4396   }
4397 };
4398 
4399 /// Align attribute for a floating value.
4400 struct AAAlignFloating : AAAlignImpl {
4401   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4402 
4403   /// See AbstractAttribute::updateImpl(...).
4404   ChangeStatus updateImpl(Attributor &A) override {
4405     const DataLayout &DL = A.getDataLayout();
4406 
4407     auto VisitValueCB = [&](Value &V, const Instruction *,
4408                             AAAlign::StateType &T, bool Stripped) -> bool {
4409       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4410                                            DepClassTy::REQUIRED);
4411       if (!Stripped && this == &AA) {
4412         int64_t Offset;
4413         unsigned Alignment = 1;
4414         if (const Value *Base =
4415                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4416           Align PA = Base->getPointerAlignment(DL);
4417           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4418           // So we can say that the maximum power of two which is a divisor of
4419           // gcd(Offset, Alignment) is an alignment.
4420 
4421           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4422                                                uint32_t(PA.value()));
4423           Alignment = llvm::PowerOf2Floor(gcd);
4424         } else {
4425           Alignment = V.getPointerAlignment(DL).value();
4426         }
4427         // Use only IR information if we did not strip anything.
4428         T.takeKnownMaximum(Alignment);
4429         T.indicatePessimisticFixpoint();
4430       } else {
4431         // Use abstract attribute information.
4432         const AAAlign::StateType &DS = AA.getState();
4433         T ^= DS;
4434       }
4435       return T.isValidState();
4436     };
4437 
4438     StateType T;
4439     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4440                                           VisitValueCB, getCtxI()))
4441       return indicatePessimisticFixpoint();
4442 
4443     // TODO: If we know we visited all incoming values, thus no are assumed
4444     // dead, we can take the known information from the state T.
4445     return clampStateAndIndicateChange(getState(), T);
4446   }
4447 
4448   /// See AbstractAttribute::trackStatistics()
4449   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4450 };
4451 
4452 /// Align attribute for function return value.
4453 struct AAAlignReturned final
4454     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4455   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4456   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4457 
4458   /// See AbstractAttribute::initialize(...).
4459   void initialize(Attributor &A) override {
4460     Base::initialize(A);
4461     Function *F = getAssociatedFunction();
4462     if (!F || F->isDeclaration())
4463       indicatePessimisticFixpoint();
4464   }
4465 
4466   /// See AbstractAttribute::trackStatistics()
4467   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4468 };
4469 
4470 /// Align attribute for function argument.
4471 struct AAAlignArgument final
4472     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4473   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4474   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4475 
4476   /// See AbstractAttribute::manifest(...).
4477   ChangeStatus manifest(Attributor &A) override {
4478     // If the associated argument is involved in a must-tail call we give up
4479     // because we would need to keep the argument alignments of caller and
4480     // callee in-sync. Just does not seem worth the trouble right now.
4481     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4482       return ChangeStatus::UNCHANGED;
4483     return Base::manifest(A);
4484   }
4485 
4486   /// See AbstractAttribute::trackStatistics()
4487   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4488 };
4489 
4490 struct AAAlignCallSiteArgument final : AAAlignFloating {
4491   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4492       : AAAlignFloating(IRP, A) {}
4493 
4494   /// See AbstractAttribute::manifest(...).
4495   ChangeStatus manifest(Attributor &A) override {
4496     // If the associated argument is involved in a must-tail call we give up
4497     // because we would need to keep the argument alignments of caller and
4498     // callee in-sync. Just does not seem worth the trouble right now.
4499     if (Argument *Arg = getAssociatedArgument())
4500       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4501         return ChangeStatus::UNCHANGED;
4502     ChangeStatus Changed = AAAlignImpl::manifest(A);
4503     Align InheritAlign =
4504         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4505     if (InheritAlign >= getAssumedAlign())
4506       Changed = ChangeStatus::UNCHANGED;
4507     return Changed;
4508   }
4509 
4510   /// See AbstractAttribute::updateImpl(Attributor &A).
4511   ChangeStatus updateImpl(Attributor &A) override {
4512     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4513     if (Argument *Arg = getAssociatedArgument()) {
4514       // We only take known information from the argument
4515       // so we do not need to track a dependence.
4516       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4517           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4518       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4519     }
4520     return Changed;
4521   }
4522 
4523   /// See AbstractAttribute::trackStatistics()
4524   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4525 };
4526 
4527 /// Align attribute deduction for a call site return value.
4528 struct AAAlignCallSiteReturned final
4529     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4530   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4531   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4532       : Base(IRP, A) {}
4533 
4534   /// See AbstractAttribute::initialize(...).
4535   void initialize(Attributor &A) override {
4536     Base::initialize(A);
4537     Function *F = getAssociatedFunction();
4538     if (!F || F->isDeclaration())
4539       indicatePessimisticFixpoint();
4540   }
4541 
4542   /// See AbstractAttribute::trackStatistics()
4543   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4544 };
4545 
4546 /// ------------------ Function No-Return Attribute ----------------------------
4547 struct AANoReturnImpl : public AANoReturn {
4548   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4549 
4550   /// See AbstractAttribute::initialize(...).
4551   void initialize(Attributor &A) override {
4552     AANoReturn::initialize(A);
4553     Function *F = getAssociatedFunction();
4554     if (!F || F->isDeclaration())
4555       indicatePessimisticFixpoint();
4556   }
4557 
4558   /// See AbstractAttribute::getAsStr().
4559   const std::string getAsStr() const override {
4560     return getAssumed() ? "noreturn" : "may-return";
4561   }
4562 
4563   /// See AbstractAttribute::updateImpl(Attributor &A).
4564   virtual ChangeStatus updateImpl(Attributor &A) override {
4565     auto CheckForNoReturn = [](Instruction &) { return false; };
4566     bool UsedAssumedInformation = false;
4567     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4568                                    {(unsigned)Instruction::Ret},
4569                                    UsedAssumedInformation))
4570       return indicatePessimisticFixpoint();
4571     return ChangeStatus::UNCHANGED;
4572   }
4573 };
4574 
4575 struct AANoReturnFunction final : AANoReturnImpl {
4576   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4577       : AANoReturnImpl(IRP, A) {}
4578 
4579   /// See AbstractAttribute::trackStatistics()
4580   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4581 };
4582 
4583 /// NoReturn attribute deduction for a call sites.
4584 struct AANoReturnCallSite final : AANoReturnImpl {
4585   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4586       : AANoReturnImpl(IRP, A) {}
4587 
4588   /// See AbstractAttribute::initialize(...).
4589   void initialize(Attributor &A) override {
4590     AANoReturnImpl::initialize(A);
4591     if (Function *F = getAssociatedFunction()) {
4592       const IRPosition &FnPos = IRPosition::function(*F);
4593       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4594       if (!FnAA.isAssumedNoReturn())
4595         indicatePessimisticFixpoint();
4596     }
4597   }
4598 
4599   /// See AbstractAttribute::updateImpl(...).
4600   ChangeStatus updateImpl(Attributor &A) override {
4601     // TODO: Once we have call site specific value information we can provide
4602     //       call site specific liveness information and then it makes
4603     //       sense to specialize attributes for call sites arguments instead of
4604     //       redirecting requests to the callee argument.
4605     Function *F = getAssociatedFunction();
4606     const IRPosition &FnPos = IRPosition::function(*F);
4607     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4608     return clampStateAndIndicateChange(getState(), FnAA.getState());
4609   }
4610 
4611   /// See AbstractAttribute::trackStatistics()
4612   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4613 };
4614 
4615 /// ----------------------- Variable Capturing ---------------------------------
4616 
4617 /// A class to hold the state of for no-capture attributes.
4618 struct AANoCaptureImpl : public AANoCapture {
4619   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4620 
4621   /// See AbstractAttribute::initialize(...).
4622   void initialize(Attributor &A) override {
4623     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4624       indicateOptimisticFixpoint();
4625       return;
4626     }
4627     Function *AnchorScope = getAnchorScope();
4628     if (isFnInterfaceKind() &&
4629         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4630       indicatePessimisticFixpoint();
4631       return;
4632     }
4633 
4634     // You cannot "capture" null in the default address space.
4635     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4636         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4637       indicateOptimisticFixpoint();
4638       return;
4639     }
4640 
4641     const Function *F =
4642         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4643 
4644     // Check what state the associated function can actually capture.
4645     if (F)
4646       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4647     else
4648       indicatePessimisticFixpoint();
4649   }
4650 
4651   /// See AbstractAttribute::updateImpl(...).
4652   ChangeStatus updateImpl(Attributor &A) override;
4653 
4654   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4655   virtual void
4656   getDeducedAttributes(LLVMContext &Ctx,
4657                        SmallVectorImpl<Attribute> &Attrs) const override {
4658     if (!isAssumedNoCaptureMaybeReturned())
4659       return;
4660 
4661     if (isArgumentPosition()) {
4662       if (isAssumedNoCapture())
4663         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4664       else if (ManifestInternal)
4665         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4666     }
4667   }
4668 
4669   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4670   /// depending on the ability of the function associated with \p IRP to capture
4671   /// state in memory and through "returning/throwing", respectively.
4672   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4673                                                    const Function &F,
4674                                                    BitIntegerState &State) {
4675     // TODO: Once we have memory behavior attributes we should use them here.
4676 
4677     // If we know we cannot communicate or write to memory, we do not care about
4678     // ptr2int anymore.
4679     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4680         F.getReturnType()->isVoidTy()) {
4681       State.addKnownBits(NO_CAPTURE);
4682       return;
4683     }
4684 
4685     // A function cannot capture state in memory if it only reads memory, it can
4686     // however return/throw state and the state might be influenced by the
4687     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4688     if (F.onlyReadsMemory())
4689       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4690 
4691     // A function cannot communicate state back if it does not through
4692     // exceptions and doesn not return values.
4693     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4694       State.addKnownBits(NOT_CAPTURED_IN_RET);
4695 
4696     // Check existing "returned" attributes.
4697     int ArgNo = IRP.getCalleeArgNo();
4698     if (F.doesNotThrow() && ArgNo >= 0) {
4699       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4700         if (F.hasParamAttribute(u, Attribute::Returned)) {
4701           if (u == unsigned(ArgNo))
4702             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4703           else if (F.onlyReadsMemory())
4704             State.addKnownBits(NO_CAPTURE);
4705           else
4706             State.addKnownBits(NOT_CAPTURED_IN_RET);
4707           break;
4708         }
4709     }
4710   }
4711 
4712   /// See AbstractState::getAsStr().
4713   const std::string getAsStr() const override {
4714     if (isKnownNoCapture())
4715       return "known not-captured";
4716     if (isAssumedNoCapture())
4717       return "assumed not-captured";
4718     if (isKnownNoCaptureMaybeReturned())
4719       return "known not-captured-maybe-returned";
4720     if (isAssumedNoCaptureMaybeReturned())
4721       return "assumed not-captured-maybe-returned";
4722     return "assumed-captured";
4723   }
4724 };
4725 
4726 /// Attributor-aware capture tracker.
4727 struct AACaptureUseTracker final : public CaptureTracker {
4728 
4729   /// Create a capture tracker that can lookup in-flight abstract attributes
4730   /// through the Attributor \p A.
4731   ///
4732   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4733   /// search is stopped. If a use leads to a return instruction,
4734   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4735   /// If a use leads to a ptr2int which may capture the value,
4736   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4737   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4738   /// set. All values in \p PotentialCopies are later tracked as well. For every
4739   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4740   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4741   /// conservatively set to true.
4742   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4743                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4744                       SmallSetVector<Value *, 4> &PotentialCopies,
4745                       unsigned &RemainingUsesToExplore)
4746       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4747         PotentialCopies(PotentialCopies),
4748         RemainingUsesToExplore(RemainingUsesToExplore) {}
4749 
4750   /// Determine if \p V maybe captured. *Also updates the state!*
4751   bool valueMayBeCaptured(const Value *V) {
4752     if (V->getType()->isPointerTy()) {
4753       PointerMayBeCaptured(V, this);
4754     } else {
4755       State.indicatePessimisticFixpoint();
4756     }
4757     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4758   }
4759 
4760   /// See CaptureTracker::tooManyUses().
4761   void tooManyUses() override {
4762     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4763   }
4764 
4765   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4766     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4767       return true;
4768     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4769         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4770     return DerefAA.getAssumedDereferenceableBytes();
4771   }
4772 
4773   /// See CaptureTracker::captured(...).
4774   bool captured(const Use *U) override {
4775     Instruction *UInst = cast<Instruction>(U->getUser());
4776     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4777                       << "\n");
4778 
4779     // Because we may reuse the tracker multiple times we keep track of the
4780     // number of explored uses ourselves as well.
4781     if (RemainingUsesToExplore-- == 0) {
4782       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4783       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4784                           /* Return */ true);
4785     }
4786 
4787     // Deal with ptr2int by following uses.
4788     if (isa<PtrToIntInst>(UInst)) {
4789       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4790       return valueMayBeCaptured(UInst);
4791     }
4792 
4793     // For stores we check if we can follow the value through memory or not.
4794     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4795       if (SI->isVolatile())
4796         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4797                             /* Return */ false);
4798       bool UsedAssumedInformation = false;
4799       if (!AA::getPotentialCopiesOfStoredValue(
4800               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4801         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4802                             /* Return */ false);
4803       // Not captured directly, potential copies will be checked.
4804       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4805                           /* Return */ false);
4806     }
4807 
4808     // Explicitly catch return instructions.
4809     if (isa<ReturnInst>(UInst)) {
4810       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4811         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4812                             /* Return */ true);
4813       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4814                           /* Return */ true);
4815     }
4816 
4817     // For now we only use special logic for call sites. However, the tracker
4818     // itself knows about a lot of other non-capturing cases already.
4819     auto *CB = dyn_cast<CallBase>(UInst);
4820     if (!CB || !CB->isArgOperand(U))
4821       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4822                           /* Return */ true);
4823 
4824     unsigned ArgNo = CB->getArgOperandNo(U);
4825     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4826     // If we have a abstract no-capture attribute for the argument we can use
4827     // it to justify a non-capture attribute here. This allows recursion!
4828     auto &ArgNoCaptureAA =
4829         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4830     if (ArgNoCaptureAA.isAssumedNoCapture())
4831       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4832                           /* Return */ false);
4833     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4834       addPotentialCopy(*CB);
4835       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4836                           /* Return */ false);
4837     }
4838 
4839     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4840     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4841                         /* Return */ true);
4842   }
4843 
4844   /// Register \p CS as potential copy of the value we are checking.
4845   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4846 
4847   /// See CaptureTracker::shouldExplore(...).
4848   bool shouldExplore(const Use *U) override {
4849     // Check liveness and ignore droppable users.
4850     bool UsedAssumedInformation = false;
4851     return !U->getUser()->isDroppable() &&
4852            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4853                             UsedAssumedInformation);
4854   }
4855 
4856   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4857   /// \p CapturedInRet, then return the appropriate value for use in the
4858   /// CaptureTracker::captured() interface.
4859   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4860                     bool CapturedInRet) {
4861     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4862                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4863     if (CapturedInMem)
4864       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4865     if (CapturedInInt)
4866       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4867     if (CapturedInRet)
4868       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4869     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4870   }
4871 
4872 private:
4873   /// The attributor providing in-flight abstract attributes.
4874   Attributor &A;
4875 
4876   /// The abstract attribute currently updated.
4877   AANoCapture &NoCaptureAA;
4878 
4879   /// The abstract liveness state.
4880   const AAIsDead &IsDeadAA;
4881 
4882   /// The state currently updated.
4883   AANoCapture::StateType &State;
4884 
4885   /// Set of potential copies of the tracked value.
4886   SmallSetVector<Value *, 4> &PotentialCopies;
4887 
4888   /// Global counter to limit the number of explored uses.
4889   unsigned &RemainingUsesToExplore;
4890 };
4891 
4892 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4893   const IRPosition &IRP = getIRPosition();
4894   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4895                                   : &IRP.getAssociatedValue();
4896   if (!V)
4897     return indicatePessimisticFixpoint();
4898 
4899   const Function *F =
4900       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4901   assert(F && "Expected a function!");
4902   const IRPosition &FnPos = IRPosition::function(*F);
4903   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4904 
4905   AANoCapture::StateType T;
4906 
4907   // Readonly means we cannot capture through memory.
4908   const auto &FnMemAA =
4909       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4910   if (FnMemAA.isAssumedReadOnly()) {
4911     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4912     if (FnMemAA.isKnownReadOnly())
4913       addKnownBits(NOT_CAPTURED_IN_MEM);
4914     else
4915       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4916   }
4917 
4918   // Make sure all returned values are different than the underlying value.
4919   // TODO: we could do this in a more sophisticated way inside
4920   //       AAReturnedValues, e.g., track all values that escape through returns
4921   //       directly somehow.
4922   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4923     bool SeenConstant = false;
4924     for (auto &It : RVAA.returned_values()) {
4925       if (isa<Constant>(It.first)) {
4926         if (SeenConstant)
4927           return false;
4928         SeenConstant = true;
4929       } else if (!isa<Argument>(It.first) ||
4930                  It.first == getAssociatedArgument())
4931         return false;
4932     }
4933     return true;
4934   };
4935 
4936   const auto &NoUnwindAA =
4937       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4938   if (NoUnwindAA.isAssumedNoUnwind()) {
4939     bool IsVoidTy = F->getReturnType()->isVoidTy();
4940     const AAReturnedValues *RVAA =
4941         IsVoidTy ? nullptr
4942                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4943 
4944                                                  DepClassTy::OPTIONAL);
4945     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4946       T.addKnownBits(NOT_CAPTURED_IN_RET);
4947       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4948         return ChangeStatus::UNCHANGED;
4949       if (NoUnwindAA.isKnownNoUnwind() &&
4950           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4951         addKnownBits(NOT_CAPTURED_IN_RET);
4952         if (isKnown(NOT_CAPTURED_IN_MEM))
4953           return indicateOptimisticFixpoint();
4954       }
4955     }
4956   }
4957 
4958   // Use the CaptureTracker interface and logic with the specialized tracker,
4959   // defined in AACaptureUseTracker, that can look at in-flight abstract
4960   // attributes and directly updates the assumed state.
4961   SmallSetVector<Value *, 4> PotentialCopies;
4962   unsigned RemainingUsesToExplore =
4963       getDefaultMaxUsesToExploreForCaptureTracking();
4964   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4965                               RemainingUsesToExplore);
4966 
4967   // Check all potential copies of the associated value until we can assume
4968   // none will be captured or we have to assume at least one might be.
4969   unsigned Idx = 0;
4970   PotentialCopies.insert(V);
4971   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4972     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4973 
4974   AANoCapture::StateType &S = getState();
4975   auto Assumed = S.getAssumed();
4976   S.intersectAssumedBits(T.getAssumed());
4977   if (!isAssumedNoCaptureMaybeReturned())
4978     return indicatePessimisticFixpoint();
4979   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4980                                    : ChangeStatus::CHANGED;
4981 }
4982 
4983 /// NoCapture attribute for function arguments.
4984 struct AANoCaptureArgument final : AANoCaptureImpl {
4985   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4986       : AANoCaptureImpl(IRP, A) {}
4987 
4988   /// See AbstractAttribute::trackStatistics()
4989   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4990 };
4991 
4992 /// NoCapture attribute for call site arguments.
4993 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4994   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4995       : AANoCaptureImpl(IRP, A) {}
4996 
4997   /// See AbstractAttribute::initialize(...).
4998   void initialize(Attributor &A) override {
4999     if (Argument *Arg = getAssociatedArgument())
5000       if (Arg->hasByValAttr())
5001         indicateOptimisticFixpoint();
5002     AANoCaptureImpl::initialize(A);
5003   }
5004 
5005   /// See AbstractAttribute::updateImpl(...).
5006   ChangeStatus updateImpl(Attributor &A) override {
5007     // TODO: Once we have call site specific value information we can provide
5008     //       call site specific liveness information and then it makes
5009     //       sense to specialize attributes for call sites arguments instead of
5010     //       redirecting requests to the callee argument.
5011     Argument *Arg = getAssociatedArgument();
5012     if (!Arg)
5013       return indicatePessimisticFixpoint();
5014     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5015     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5016     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5017   }
5018 
5019   /// See AbstractAttribute::trackStatistics()
5020   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5021 };
5022 
5023 /// NoCapture attribute for floating values.
5024 struct AANoCaptureFloating final : AANoCaptureImpl {
5025   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5026       : AANoCaptureImpl(IRP, A) {}
5027 
5028   /// See AbstractAttribute::trackStatistics()
5029   void trackStatistics() const override {
5030     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5031   }
5032 };
5033 
5034 /// NoCapture attribute for function return value.
5035 struct AANoCaptureReturned final : AANoCaptureImpl {
5036   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5037       : AANoCaptureImpl(IRP, A) {
5038     llvm_unreachable("NoCapture is not applicable to function returns!");
5039   }
5040 
5041   /// See AbstractAttribute::initialize(...).
5042   void initialize(Attributor &A) override {
5043     llvm_unreachable("NoCapture is not applicable to function returns!");
5044   }
5045 
5046   /// See AbstractAttribute::updateImpl(...).
5047   ChangeStatus updateImpl(Attributor &A) override {
5048     llvm_unreachable("NoCapture is not applicable to function returns!");
5049   }
5050 
5051   /// See AbstractAttribute::trackStatistics()
5052   void trackStatistics() const override {}
5053 };
5054 
5055 /// NoCapture attribute deduction for a call site return value.
5056 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5057   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5058       : AANoCaptureImpl(IRP, A) {}
5059 
5060   /// See AbstractAttribute::initialize(...).
5061   void initialize(Attributor &A) override {
5062     const Function *F = getAnchorScope();
5063     // Check what state the associated function can actually capture.
5064     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5065   }
5066 
5067   /// See AbstractAttribute::trackStatistics()
5068   void trackStatistics() const override {
5069     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5070   }
5071 };
5072 
5073 /// ------------------ Value Simplify Attribute ----------------------------
5074 
5075 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5076   // FIXME: Add a typecast support.
5077   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5078       SimplifiedAssociatedValue, Other, Ty);
5079   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5080     return false;
5081 
5082   LLVM_DEBUG({
5083     if (SimplifiedAssociatedValue.hasValue())
5084       dbgs() << "[ValueSimplify] is assumed to be "
5085              << **SimplifiedAssociatedValue << "\n";
5086     else
5087       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5088   });
5089   return true;
5090 }
5091 
5092 struct AAValueSimplifyImpl : AAValueSimplify {
5093   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5094       : AAValueSimplify(IRP, A) {}
5095 
5096   /// See AbstractAttribute::initialize(...).
5097   void initialize(Attributor &A) override {
5098     if (getAssociatedValue().getType()->isVoidTy())
5099       indicatePessimisticFixpoint();
5100     if (A.hasSimplificationCallback(getIRPosition()))
5101       indicatePessimisticFixpoint();
5102   }
5103 
5104   /// See AbstractAttribute::getAsStr().
5105   const std::string getAsStr() const override {
5106     LLVM_DEBUG({
5107       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5108       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5109         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5110     });
5111     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5112                           : "not-simple";
5113   }
5114 
5115   /// See AbstractAttribute::trackStatistics()
5116   void trackStatistics() const override {}
5117 
5118   /// See AAValueSimplify::getAssumedSimplifiedValue()
5119   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5120     return SimplifiedAssociatedValue;
5121   }
5122 
5123   /// Return a value we can use as replacement for the associated one, or
5124   /// nullptr if we don't have one that makes sense.
5125   Value *getReplacementValue(Attributor &A) const {
5126     Value *NewV;
5127     NewV = SimplifiedAssociatedValue.hasValue()
5128                ? SimplifiedAssociatedValue.getValue()
5129                : UndefValue::get(getAssociatedType());
5130     if (!NewV)
5131       return nullptr;
5132     NewV = AA::getWithType(*NewV, *getAssociatedType());
5133     if (!NewV || NewV == &getAssociatedValue())
5134       return nullptr;
5135     const Instruction *CtxI = getCtxI();
5136     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5137       return nullptr;
5138     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5139       return nullptr;
5140     return NewV;
5141   }
5142 
5143   /// Helper function for querying AAValueSimplify and updating candicate.
5144   /// \param IRP The value position we are trying to unify with SimplifiedValue
5145   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5146                       const IRPosition &IRP, bool Simplify = true) {
5147     bool UsedAssumedInformation = false;
5148     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5149     if (Simplify)
5150       QueryingValueSimplified =
5151           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5152     return unionAssumed(QueryingValueSimplified);
5153   }
5154 
5155   /// Returns a candidate is found or not
5156   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5157     if (!getAssociatedValue().getType()->isIntegerTy())
5158       return false;
5159 
5160     // This will also pass the call base context.
5161     const auto &AA =
5162         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5163 
5164     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5165 
5166     if (!COpt.hasValue()) {
5167       SimplifiedAssociatedValue = llvm::None;
5168       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5169       return true;
5170     }
5171     if (auto *C = COpt.getValue()) {
5172       SimplifiedAssociatedValue = C;
5173       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5174       return true;
5175     }
5176     return false;
5177   }
5178 
5179   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5180     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5181       return true;
5182     if (askSimplifiedValueFor<AAPotentialValues>(A))
5183       return true;
5184     return false;
5185   }
5186 
5187   /// See AbstractAttribute::manifest(...).
5188   ChangeStatus manifest(Attributor &A) override {
5189     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5190     if (getAssociatedValue().user_empty())
5191       return Changed;
5192 
5193     if (auto *NewV = getReplacementValue(A)) {
5194       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5195                         << *NewV << " :: " << *this << "\n");
5196       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5197         Changed = ChangeStatus::CHANGED;
5198     }
5199 
5200     return Changed | AAValueSimplify::manifest(A);
5201   }
5202 
5203   /// See AbstractState::indicatePessimisticFixpoint(...).
5204   ChangeStatus indicatePessimisticFixpoint() override {
5205     SimplifiedAssociatedValue = &getAssociatedValue();
5206     return AAValueSimplify::indicatePessimisticFixpoint();
5207   }
5208 
5209   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5210                          LoadInst &L, function_ref<bool(Value &)> Union) {
5211     auto UnionWrapper = [&](Value &V, Value &Obj) {
5212       if (isa<AllocaInst>(Obj))
5213         return Union(V);
5214       if (!AA::isDynamicallyUnique(A, AA, V))
5215         return false;
5216       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5217         return false;
5218       return Union(V);
5219     };
5220 
5221     Value &Ptr = *L.getPointerOperand();
5222     SmallVector<Value *, 8> Objects;
5223     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5224       return false;
5225 
5226     for (Value *Obj : Objects) {
5227       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5228       if (isa<UndefValue>(Obj))
5229         continue;
5230       if (isa<ConstantPointerNull>(Obj)) {
5231         // A null pointer access can be undefined but any offset from null may
5232         // be OK. We do not try to optimize the latter.
5233         bool UsedAssumedInformation = false;
5234         if (!NullPointerIsDefined(L.getFunction(),
5235                                   Ptr.getType()->getPointerAddressSpace()) &&
5236             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5237           continue;
5238         return false;
5239       }
5240       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5241         return false;
5242       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5243       if (!InitialVal || !Union(*InitialVal))
5244         return false;
5245 
5246       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5247                            "propagation, checking accesses next.\n");
5248 
5249       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5250         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5251         if (!Acc.isWrite())
5252           return true;
5253         if (Acc.isWrittenValueYetUndetermined())
5254           return true;
5255         Value *Content = Acc.getWrittenValue();
5256         if (!Content)
5257           return false;
5258         Value *CastedContent =
5259             AA::getWithType(*Content, *AA.getAssociatedType());
5260         if (!CastedContent)
5261           return false;
5262         if (IsExact)
5263           return UnionWrapper(*CastedContent, *Obj);
5264         if (auto *C = dyn_cast<Constant>(CastedContent))
5265           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5266             return UnionWrapper(*CastedContent, *Obj);
5267         return false;
5268       };
5269 
5270       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5271                                            DepClassTy::REQUIRED);
5272       if (!PI.forallInterferingAccesses(L, CheckAccess))
5273         return false;
5274     }
5275     return true;
5276   }
5277 };
5278 
5279 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5280   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5281       : AAValueSimplifyImpl(IRP, A) {}
5282 
5283   void initialize(Attributor &A) override {
5284     AAValueSimplifyImpl::initialize(A);
5285     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5286       indicatePessimisticFixpoint();
5287     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5288                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5289                 /* IgnoreSubsumingPositions */ true))
5290       indicatePessimisticFixpoint();
5291 
5292     // FIXME: This is a hack to prevent us from propagating function poiner in
5293     // the new pass manager CGSCC pass as it creates call edges the
5294     // CallGraphUpdater cannot handle yet.
5295     Value &V = getAssociatedValue();
5296     if (V.getType()->isPointerTy() &&
5297         V.getType()->getPointerElementType()->isFunctionTy() &&
5298         !A.isModulePass())
5299       indicatePessimisticFixpoint();
5300   }
5301 
5302   /// See AbstractAttribute::updateImpl(...).
5303   ChangeStatus updateImpl(Attributor &A) override {
5304     // Byval is only replacable if it is readonly otherwise we would write into
5305     // the replaced value and not the copy that byval creates implicitly.
5306     Argument *Arg = getAssociatedArgument();
5307     if (Arg->hasByValAttr()) {
5308       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5309       //       there is no race by not copying a constant byval.
5310       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5311                                                        DepClassTy::REQUIRED);
5312       if (!MemAA.isAssumedReadOnly())
5313         return indicatePessimisticFixpoint();
5314     }
5315 
5316     auto Before = SimplifiedAssociatedValue;
5317 
5318     auto PredForCallSite = [&](AbstractCallSite ACS) {
5319       const IRPosition &ACSArgPos =
5320           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5321       // Check if a coresponding argument was found or if it is on not
5322       // associated (which can happen for callback calls).
5323       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5324         return false;
5325 
5326       // Simplify the argument operand explicitly and check if the result is
5327       // valid in the current scope. This avoids refering to simplified values
5328       // in other functions, e.g., we don't want to say a an argument in a
5329       // static function is actually an argument in a different function.
5330       bool UsedAssumedInformation = false;
5331       Optional<Constant *> SimpleArgOp =
5332           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5333       if (!SimpleArgOp.hasValue())
5334         return true;
5335       if (!SimpleArgOp.getValue())
5336         return false;
5337       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5338         return false;
5339       return unionAssumed(*SimpleArgOp);
5340     };
5341 
5342     // Generate a answer specific to a call site context.
5343     bool Success;
5344     bool AllCallSitesKnown;
5345     if (hasCallBaseContext() &&
5346         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5347       Success = PredForCallSite(
5348           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5349     else
5350       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5351                                        AllCallSitesKnown);
5352 
5353     if (!Success)
5354       if (!askSimplifiedValueForOtherAAs(A))
5355         return indicatePessimisticFixpoint();
5356 
5357     // If a candicate was found in this update, return CHANGED.
5358     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5359                                                : ChangeStatus ::CHANGED;
5360   }
5361 
5362   /// See AbstractAttribute::trackStatistics()
5363   void trackStatistics() const override {
5364     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5365   }
5366 };
5367 
5368 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5369   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5370       : AAValueSimplifyImpl(IRP, A) {}
5371 
5372   /// See AAValueSimplify::getAssumedSimplifiedValue()
5373   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5374     if (!isValidState())
5375       return nullptr;
5376     return SimplifiedAssociatedValue;
5377   }
5378 
5379   /// See AbstractAttribute::updateImpl(...).
5380   ChangeStatus updateImpl(Attributor &A) override {
5381     auto Before = SimplifiedAssociatedValue;
5382 
5383     auto PredForReturned = [&](Value &V) {
5384       return checkAndUpdate(A, *this,
5385                             IRPosition::value(V, getCallBaseContext()));
5386     };
5387 
5388     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5389       if (!askSimplifiedValueForOtherAAs(A))
5390         return indicatePessimisticFixpoint();
5391 
5392     // If a candicate was found in this update, return CHANGED.
5393     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5394                                                : ChangeStatus ::CHANGED;
5395   }
5396 
5397   ChangeStatus manifest(Attributor &A) override {
5398     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5399 
5400     if (auto *NewV = getReplacementValue(A)) {
5401       auto PredForReturned =
5402           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5403             for (ReturnInst *RI : RetInsts) {
5404               Value *ReturnedVal = RI->getReturnValue();
5405               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5406                 return true;
5407               assert(RI->getFunction() == getAnchorScope() &&
5408                      "ReturnInst in wrong function!");
5409               LLVM_DEBUG(dbgs()
5410                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5411                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5412               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5413                 Changed = ChangeStatus::CHANGED;
5414             }
5415             return true;
5416           };
5417       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5418     }
5419 
5420     return Changed | AAValueSimplify::manifest(A);
5421   }
5422 
5423   /// See AbstractAttribute::trackStatistics()
5424   void trackStatistics() const override {
5425     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5426   }
5427 };
5428 
5429 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5430   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5431       : AAValueSimplifyImpl(IRP, A) {}
5432 
5433   /// See AbstractAttribute::initialize(...).
5434   void initialize(Attributor &A) override {
5435     AAValueSimplifyImpl::initialize(A);
5436     Value &V = getAnchorValue();
5437 
5438     // TODO: add other stuffs
5439     if (isa<Constant>(V))
5440       indicatePessimisticFixpoint();
5441   }
5442 
5443   /// Check if \p Cmp is a comparison we can simplify.
5444   ///
5445   /// We handle multiple cases, one in which at least one operand is an
5446   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5447   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5448   /// will be updated.
5449   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5450     auto Union = [&](Value &V) {
5451       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5452           SimplifiedAssociatedValue, &V, V.getType());
5453       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5454     };
5455 
5456     Value *LHS = Cmp.getOperand(0);
5457     Value *RHS = Cmp.getOperand(1);
5458 
5459     // Simplify the operands first.
5460     bool UsedAssumedInformation = false;
5461     const auto &SimplifiedLHS =
5462         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5463                                *this, UsedAssumedInformation);
5464     if (!SimplifiedLHS.hasValue())
5465       return true;
5466     if (!SimplifiedLHS.getValue())
5467       return false;
5468     LHS = *SimplifiedLHS;
5469 
5470     const auto &SimplifiedRHS =
5471         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5472                                *this, UsedAssumedInformation);
5473     if (!SimplifiedRHS.hasValue())
5474       return true;
5475     if (!SimplifiedRHS.getValue())
5476       return false;
5477     RHS = *SimplifiedRHS;
5478 
5479     LLVMContext &Ctx = Cmp.getContext();
5480     // Handle the trivial case first in which we don't even need to think about
5481     // null or non-null.
5482     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5483       Constant *NewVal =
5484           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5485       if (!Union(*NewVal))
5486         return false;
5487       if (!UsedAssumedInformation)
5488         indicateOptimisticFixpoint();
5489       return true;
5490     }
5491 
5492     // From now on we only handle equalities (==, !=).
5493     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5494     if (!ICmp || !ICmp->isEquality())
5495       return false;
5496 
5497     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5498     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5499     if (!LHSIsNull && !RHSIsNull)
5500       return false;
5501 
5502     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5503     // non-nullptr operand and if we assume it's non-null we can conclude the
5504     // result of the comparison.
5505     assert((LHSIsNull || RHSIsNull) &&
5506            "Expected nullptr versus non-nullptr comparison at this point");
5507 
5508     // The index is the operand that we assume is not null.
5509     unsigned PtrIdx = LHSIsNull;
5510     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5511         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5512         DepClassTy::REQUIRED);
5513     if (!PtrNonNullAA.isAssumedNonNull())
5514       return false;
5515     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5516 
5517     // The new value depends on the predicate, true for != and false for ==.
5518     Constant *NewVal = ConstantInt::get(
5519         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5520     if (!Union(*NewVal))
5521       return false;
5522 
5523     if (!UsedAssumedInformation)
5524       indicateOptimisticFixpoint();
5525 
5526     return true;
5527   }
5528 
5529   bool updateWithLoad(Attributor &A, LoadInst &L) {
5530     auto Union = [&](Value &V) {
5531       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5532           SimplifiedAssociatedValue, &V, L.getType());
5533       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5534     };
5535     return handleLoad(A, *this, L, Union);
5536   }
5537 
5538   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5539   /// simplify any operand of the instruction \p I. Return true if successful,
5540   /// in that case SimplifiedAssociatedValue will be updated.
5541   bool handleGenericInst(Attributor &A, Instruction &I) {
5542     bool SomeSimplified = false;
5543     bool UsedAssumedInformation = false;
5544 
5545     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5546     int Idx = 0;
5547     for (Value *Op : I.operands()) {
5548       const auto &SimplifiedOp =
5549           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5550                                  *this, UsedAssumedInformation);
5551       // If we are not sure about any operand we are not sure about the entire
5552       // instruction, we'll wait.
5553       if (!SimplifiedOp.hasValue())
5554         return true;
5555 
5556       if (SimplifiedOp.getValue())
5557         NewOps[Idx] = SimplifiedOp.getValue();
5558       else
5559         NewOps[Idx] = Op;
5560 
5561       SomeSimplified |= (NewOps[Idx] != Op);
5562       ++Idx;
5563     }
5564 
5565     // We won't bother with the InstSimplify interface if we didn't simplify any
5566     // operand ourselves.
5567     if (!SomeSimplified)
5568       return false;
5569 
5570     InformationCache &InfoCache = A.getInfoCache();
5571     Function *F = I.getFunction();
5572     const auto *DT =
5573         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5574     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5575     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5576     OptimizationRemarkEmitter *ORE = nullptr;
5577 
5578     const DataLayout &DL = I.getModule()->getDataLayout();
5579     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5580     if (Value *SimplifiedI =
5581             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5582       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5583           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5584       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5585     }
5586     return false;
5587   }
5588 
5589   /// See AbstractAttribute::updateImpl(...).
5590   ChangeStatus updateImpl(Attributor &A) override {
5591     auto Before = SimplifiedAssociatedValue;
5592 
5593     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5594                             bool Stripped) -> bool {
5595       auto &AA = A.getAAFor<AAValueSimplify>(
5596           *this, IRPosition::value(V, getCallBaseContext()),
5597           DepClassTy::REQUIRED);
5598       if (!Stripped && this == &AA) {
5599 
5600         if (auto *I = dyn_cast<Instruction>(&V)) {
5601           if (auto *LI = dyn_cast<LoadInst>(&V))
5602             if (updateWithLoad(A, *LI))
5603               return true;
5604           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5605             if (handleCmp(A, *Cmp))
5606               return true;
5607           if (handleGenericInst(A, *I))
5608             return true;
5609         }
5610         // TODO: Look the instruction and check recursively.
5611 
5612         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5613                           << "\n");
5614         return false;
5615       }
5616       return checkAndUpdate(A, *this,
5617                             IRPosition::value(V, getCallBaseContext()));
5618     };
5619 
5620     bool Dummy = false;
5621     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5622                                      VisitValueCB, getCtxI(),
5623                                      /* UseValueSimplify */ false))
5624       if (!askSimplifiedValueForOtherAAs(A))
5625         return indicatePessimisticFixpoint();
5626 
5627     // If a candicate was found in this update, return CHANGED.
5628     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5629                                                : ChangeStatus ::CHANGED;
5630   }
5631 
5632   /// See AbstractAttribute::trackStatistics()
5633   void trackStatistics() const override {
5634     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5635   }
5636 };
5637 
5638 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5639   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5640       : AAValueSimplifyImpl(IRP, A) {}
5641 
5642   /// See AbstractAttribute::initialize(...).
5643   void initialize(Attributor &A) override {
5644     SimplifiedAssociatedValue = nullptr;
5645     indicateOptimisticFixpoint();
5646   }
5647   /// See AbstractAttribute::initialize(...).
5648   ChangeStatus updateImpl(Attributor &A) override {
5649     llvm_unreachable(
5650         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5651   }
5652   /// See AbstractAttribute::trackStatistics()
5653   void trackStatistics() const override {
5654     STATS_DECLTRACK_FN_ATTR(value_simplify)
5655   }
5656 };
5657 
5658 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5659   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5660       : AAValueSimplifyFunction(IRP, A) {}
5661   /// See AbstractAttribute::trackStatistics()
5662   void trackStatistics() const override {
5663     STATS_DECLTRACK_CS_ATTR(value_simplify)
5664   }
5665 };
5666 
5667 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5668   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5669       : AAValueSimplifyImpl(IRP, A) {}
5670 
5671   void initialize(Attributor &A) override {
5672     AAValueSimplifyImpl::initialize(A);
5673     if (!getAssociatedFunction())
5674       indicatePessimisticFixpoint();
5675   }
5676 
5677   /// See AbstractAttribute::updateImpl(...).
5678   ChangeStatus updateImpl(Attributor &A) override {
5679     auto Before = SimplifiedAssociatedValue;
5680     auto &RetAA = A.getAAFor<AAReturnedValues>(
5681         *this, IRPosition::function(*getAssociatedFunction()),
5682         DepClassTy::REQUIRED);
5683     auto PredForReturned =
5684         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5685           bool UsedAssumedInformation = false;
5686           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5687               &RetVal, *cast<CallBase>(getCtxI()), *this,
5688               UsedAssumedInformation);
5689           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5690               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5691           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5692         };
5693     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5694       if (!askSimplifiedValueForOtherAAs(A))
5695         return indicatePessimisticFixpoint();
5696     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5697                                                : ChangeStatus ::CHANGED;
5698   }
5699 
5700   void trackStatistics() const override {
5701     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5702   }
5703 };
5704 
5705 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5706   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5707       : AAValueSimplifyFloating(IRP, A) {}
5708 
5709   /// See AbstractAttribute::manifest(...).
5710   ChangeStatus manifest(Attributor &A) override {
5711     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5712 
5713     if (auto *NewV = getReplacementValue(A)) {
5714       Use &U = cast<CallBase>(&getAnchorValue())
5715                    ->getArgOperandUse(getCallSiteArgNo());
5716       if (A.changeUseAfterManifest(U, *NewV))
5717         Changed = ChangeStatus::CHANGED;
5718     }
5719 
5720     return Changed | AAValueSimplify::manifest(A);
5721   }
5722 
5723   void trackStatistics() const override {
5724     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5725   }
5726 };
5727 
5728 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5729 struct AAHeapToStackFunction final : public AAHeapToStack {
5730 
5731   struct AllocationInfo {
5732     /// The call that allocates the memory.
5733     CallBase *const CB;
5734 
5735     /// The kind of allocation.
5736     const enum class AllocationKind {
5737       MALLOC,
5738       CALLOC,
5739       ALIGNED_ALLOC,
5740     } Kind;
5741 
5742     /// The library function id for the allocation.
5743     LibFunc LibraryFunctionId = NotLibFunc;
5744 
5745     /// The status wrt. a rewrite.
5746     enum {
5747       STACK_DUE_TO_USE,
5748       STACK_DUE_TO_FREE,
5749       INVALID,
5750     } Status = STACK_DUE_TO_USE;
5751 
5752     /// Flag to indicate if we encountered a use that might free this allocation
5753     /// but which is not in the deallocation infos.
5754     bool HasPotentiallyFreeingUnknownUses = false;
5755 
5756     /// The set of free calls that use this allocation.
5757     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5758   };
5759 
5760   struct DeallocationInfo {
5761     /// The call that deallocates the memory.
5762     CallBase *const CB;
5763 
5764     /// Flag to indicate if we don't know all objects this deallocation might
5765     /// free.
5766     bool MightFreeUnknownObjects = false;
5767 
5768     /// The set of allocation calls that are potentially freed.
5769     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5770   };
5771 
5772   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5773       : AAHeapToStack(IRP, A) {}
5774 
5775   ~AAHeapToStackFunction() {
5776     // Ensure we call the destructor so we release any memory allocated in the
5777     // sets.
5778     for (auto &It : AllocationInfos)
5779       It.getSecond()->~AllocationInfo();
5780     for (auto &It : DeallocationInfos)
5781       It.getSecond()->~DeallocationInfo();
5782   }
5783 
5784   void initialize(Attributor &A) override {
5785     AAHeapToStack::initialize(A);
5786 
5787     const Function *F = getAnchorScope();
5788     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5789 
5790     auto AllocationIdentifierCB = [&](Instruction &I) {
5791       CallBase *CB = dyn_cast<CallBase>(&I);
5792       if (!CB)
5793         return true;
5794       if (isFreeCall(CB, TLI)) {
5795         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5796         return true;
5797       }
5798       bool IsMalloc = isMallocLikeFn(CB, TLI);
5799       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5800       bool IsCalloc =
5801           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5802       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5803         return true;
5804       auto Kind =
5805           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5806                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5807                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5808 
5809       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5810       AllocationInfos[CB] = AI;
5811       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5812       return true;
5813     };
5814 
5815     bool UsedAssumedInformation = false;
5816     bool Success = A.checkForAllCallLikeInstructions(
5817         AllocationIdentifierCB, *this, UsedAssumedInformation,
5818         /* CheckBBLivenessOnly */ false,
5819         /* CheckPotentiallyDead */ true);
5820     (void)Success;
5821     assert(Success && "Did not expect the call base visit callback to fail!");
5822   }
5823 
5824   const std::string getAsStr() const override {
5825     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5826     for (const auto &It : AllocationInfos) {
5827       if (It.second->Status == AllocationInfo::INVALID)
5828         ++NumInvalidMallocs;
5829       else
5830         ++NumH2SMallocs;
5831     }
5832     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5833            std::to_string(NumInvalidMallocs);
5834   }
5835 
5836   /// See AbstractAttribute::trackStatistics().
5837   void trackStatistics() const override {
5838     STATS_DECL(
5839         MallocCalls, Function,
5840         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5841     for (auto &It : AllocationInfos)
5842       if (It.second->Status != AllocationInfo::INVALID)
5843         ++BUILD_STAT_NAME(MallocCalls, Function);
5844   }
5845 
5846   bool isAssumedHeapToStack(const CallBase &CB) const override {
5847     if (isValidState())
5848       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5849         return AI->Status != AllocationInfo::INVALID;
5850     return false;
5851   }
5852 
5853   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5854     if (!isValidState())
5855       return false;
5856 
5857     for (auto &It : AllocationInfos) {
5858       AllocationInfo &AI = *It.second;
5859       if (AI.Status == AllocationInfo::INVALID)
5860         continue;
5861 
5862       if (AI.PotentialFreeCalls.count(&CB))
5863         return true;
5864     }
5865 
5866     return false;
5867   }
5868 
5869   ChangeStatus manifest(Attributor &A) override {
5870     assert(getState().isValidState() &&
5871            "Attempted to manifest an invalid state!");
5872 
5873     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5874     Function *F = getAnchorScope();
5875     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5876 
5877     for (auto &It : AllocationInfos) {
5878       AllocationInfo &AI = *It.second;
5879       if (AI.Status == AllocationInfo::INVALID)
5880         continue;
5881 
5882       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5883         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5884         A.deleteAfterManifest(*FreeCall);
5885         HasChanged = ChangeStatus::CHANGED;
5886       }
5887 
5888       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5889                         << "\n");
5890 
5891       auto Remark = [&](OptimizationRemark OR) {
5892         LibFunc IsAllocShared;
5893         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5894           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5895             return OR << "Moving globalized variable to the stack.";
5896         return OR << "Moving memory allocation from the heap to the stack.";
5897       };
5898       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5899         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5900       else
5901         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5902 
5903       Value *Size;
5904       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5905       if (SizeAPI.hasValue()) {
5906         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5907       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5908         auto *Num = AI.CB->getOperand(0);
5909         auto *SizeT = AI.CB->getOperand(1);
5910         IRBuilder<> B(AI.CB);
5911         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5912       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5913         Size = AI.CB->getOperand(1);
5914       } else {
5915         Size = AI.CB->getOperand(0);
5916       }
5917 
5918       Align Alignment(1);
5919       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5920         Optional<APInt> AlignmentAPI =
5921             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5922         assert(AlignmentAPI.hasValue() &&
5923                "Expected an alignment during manifest!");
5924         Alignment =
5925             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5926       }
5927 
5928       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5929       Instruction *Alloca =
5930           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5931                          "", AI.CB->getNextNode());
5932 
5933       if (Alloca->getType() != AI.CB->getType())
5934         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5935                                  Alloca->getNextNode());
5936 
5937       A.changeValueAfterManifest(*AI.CB, *Alloca);
5938 
5939       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5940         auto *NBB = II->getNormalDest();
5941         BranchInst::Create(NBB, AI.CB->getParent());
5942         A.deleteAfterManifest(*AI.CB);
5943       } else {
5944         A.deleteAfterManifest(*AI.CB);
5945       }
5946 
5947       // Zero out the allocated memory if it was a calloc.
5948       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5949         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5950                                    Alloca->getNextNode());
5951         Value *Ops[] = {
5952             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5953             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5954 
5955         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5956         Module *M = F->getParent();
5957         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5958         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5959       }
5960       HasChanged = ChangeStatus::CHANGED;
5961     }
5962 
5963     return HasChanged;
5964   }
5965 
5966   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5967                            Value &V) {
5968     bool UsedAssumedInformation = false;
5969     Optional<Constant *> SimpleV =
5970         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5971     if (!SimpleV.hasValue())
5972       return APInt(64, 0);
5973     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5974       return CI->getValue();
5975     return llvm::None;
5976   }
5977 
5978   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
5979                           AllocationInfo &AI) {
5980 
5981     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
5982       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
5983 
5984     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
5985       // Only if the alignment is also constant we return a size.
5986       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
5987                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
5988                  : llvm::None;
5989 
5990     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
5991            "Expected only callocs are left");
5992     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
5993     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
5994     if (!Num.hasValue() || !Size.hasValue())
5995       return llvm::None;
5996     bool Overflow = false;
5997     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
5998     return Overflow ? llvm::None : Size;
5999   }
6000 
6001   /// Collection of all malloc-like calls in a function with associated
6002   /// information.
6003   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6004 
6005   /// Collection of all free-like calls in a function with associated
6006   /// information.
6007   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6008 
6009   ChangeStatus updateImpl(Attributor &A) override;
6010 };
6011 
6012 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6013   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6014   const Function *F = getAnchorScope();
6015 
6016   const auto &LivenessAA =
6017       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6018 
6019   MustBeExecutedContextExplorer &Explorer =
6020       A.getInfoCache().getMustBeExecutedContextExplorer();
6021 
6022   bool StackIsAccessibleByOtherThreads =
6023       A.getInfoCache().stackIsAccessibleByOtherThreads();
6024 
6025   // Flag to ensure we update our deallocation information at most once per
6026   // updateImpl call and only if we use the free check reasoning.
6027   bool HasUpdatedFrees = false;
6028 
6029   auto UpdateFrees = [&]() {
6030     HasUpdatedFrees = true;
6031 
6032     for (auto &It : DeallocationInfos) {
6033       DeallocationInfo &DI = *It.second;
6034       // For now we cannot use deallocations that have unknown inputs, skip
6035       // them.
6036       if (DI.MightFreeUnknownObjects)
6037         continue;
6038 
6039       // No need to analyze dead calls, ignore them instead.
6040       bool UsedAssumedInformation = false;
6041       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6042                           /* CheckBBLivenessOnly */ true))
6043         continue;
6044 
6045       // Use the optimistic version to get the freed objects, ignoring dead
6046       // branches etc.
6047       SmallVector<Value *, 8> Objects;
6048       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6049                                            *this, DI.CB)) {
6050         LLVM_DEBUG(
6051             dbgs()
6052             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6053         DI.MightFreeUnknownObjects = true;
6054         continue;
6055       }
6056 
6057       // Check each object explicitly.
6058       for (auto *Obj : Objects) {
6059         // Free of null and undef can be ignored as no-ops (or UB in the latter
6060         // case).
6061         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6062           continue;
6063 
6064         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6065         if (!ObjCB) {
6066           LLVM_DEBUG(dbgs()
6067                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6068           DI.MightFreeUnknownObjects = true;
6069           continue;
6070         }
6071 
6072         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6073         if (!AI) {
6074           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6075                             << "\n");
6076           DI.MightFreeUnknownObjects = true;
6077           continue;
6078         }
6079 
6080         DI.PotentialAllocationCalls.insert(ObjCB);
6081       }
6082     }
6083   };
6084 
6085   auto FreeCheck = [&](AllocationInfo &AI) {
6086     // If the stack is not accessible by other threads, the "must-free" logic
6087     // doesn't apply as the pointer could be shared and needs to be places in
6088     // "shareable" memory.
6089     if (!StackIsAccessibleByOtherThreads) {
6090       auto &NoSyncAA =
6091           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6092       if (!NoSyncAA.isAssumedNoSync()) {
6093         LLVM_DEBUG(
6094             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6095                       "other threads and function is not nosync:\n");
6096         return false;
6097       }
6098     }
6099     if (!HasUpdatedFrees)
6100       UpdateFrees();
6101 
6102     // TODO: Allow multi exit functions that have different free calls.
6103     if (AI.PotentialFreeCalls.size() != 1) {
6104       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6105                         << AI.PotentialFreeCalls.size() << "\n");
6106       return false;
6107     }
6108     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6109     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6110     if (!DI) {
6111       LLVM_DEBUG(
6112           dbgs() << "[H2S] unique free call was not known as deallocation call "
6113                  << *UniqueFree << "\n");
6114       return false;
6115     }
6116     if (DI->MightFreeUnknownObjects) {
6117       LLVM_DEBUG(
6118           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6119       return false;
6120     }
6121     if (DI->PotentialAllocationCalls.size() > 1) {
6122       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6123                         << DI->PotentialAllocationCalls.size()
6124                         << " different allocations\n");
6125       return false;
6126     }
6127     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6128       LLVM_DEBUG(
6129           dbgs()
6130           << "[H2S] unique free call not known to free this allocation but "
6131           << **DI->PotentialAllocationCalls.begin() << "\n");
6132       return false;
6133     }
6134     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6135     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6136       LLVM_DEBUG(
6137           dbgs()
6138           << "[H2S] unique free call might not be executed with the allocation "
6139           << *UniqueFree << "\n");
6140       return false;
6141     }
6142     return true;
6143   };
6144 
6145   auto UsesCheck = [&](AllocationInfo &AI) {
6146     bool ValidUsesOnly = true;
6147 
6148     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6149       Instruction *UserI = cast<Instruction>(U.getUser());
6150       if (isa<LoadInst>(UserI))
6151         return true;
6152       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6153         if (SI->getValueOperand() == U.get()) {
6154           LLVM_DEBUG(dbgs()
6155                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6156           ValidUsesOnly = false;
6157         } else {
6158           // A store into the malloc'ed memory is fine.
6159         }
6160         return true;
6161       }
6162       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6163         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6164           return true;
6165         if (DeallocationInfos.count(CB)) {
6166           AI.PotentialFreeCalls.insert(CB);
6167           return true;
6168         }
6169 
6170         unsigned ArgNo = CB->getArgOperandNo(&U);
6171 
6172         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6173             *this, IRPosition::callsite_argument(*CB, ArgNo),
6174             DepClassTy::OPTIONAL);
6175 
6176         // If a call site argument use is nofree, we are fine.
6177         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6178             *this, IRPosition::callsite_argument(*CB, ArgNo),
6179             DepClassTy::OPTIONAL);
6180 
6181         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6182         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6183         if (MaybeCaptured ||
6184             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6185              MaybeFreed)) {
6186           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6187 
6188           // Emit a missed remark if this is missed OpenMP globalization.
6189           auto Remark = [&](OptimizationRemarkMissed ORM) {
6190             return ORM
6191                    << "Could not move globalized variable to the stack. "
6192                       "Variable is potentially captured in call. Mark "
6193                       "parameter as `__attribute__((noescape))` to override.";
6194           };
6195 
6196           if (ValidUsesOnly &&
6197               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6198             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6199 
6200           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6201           ValidUsesOnly = false;
6202         }
6203         return true;
6204       }
6205 
6206       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6207           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6208         Follow = true;
6209         return true;
6210       }
6211       // Unknown user for which we can not track uses further (in a way that
6212       // makes sense).
6213       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6214       ValidUsesOnly = false;
6215       return true;
6216     };
6217     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6218       return false;
6219     return ValidUsesOnly;
6220   };
6221 
6222   // The actual update starts here. We look at all allocations and depending on
6223   // their status perform the appropriate check(s).
6224   for (auto &It : AllocationInfos) {
6225     AllocationInfo &AI = *It.second;
6226     if (AI.Status == AllocationInfo::INVALID)
6227       continue;
6228 
6229     if (MaxHeapToStackSize == -1) {
6230       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6231         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6232           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6233                             << "\n");
6234           AI.Status = AllocationInfo::INVALID;
6235           Changed = ChangeStatus::CHANGED;
6236           continue;
6237         }
6238     } else {
6239       Optional<APInt> Size = getSize(A, *this, AI);
6240       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6241         LLVM_DEBUG({
6242           if (!Size.hasValue())
6243             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6244                    << "\n";
6245           else
6246             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6247                    << MaxHeapToStackSize << "\n";
6248         });
6249 
6250         AI.Status = AllocationInfo::INVALID;
6251         Changed = ChangeStatus::CHANGED;
6252         continue;
6253       }
6254     }
6255 
6256     switch (AI.Status) {
6257     case AllocationInfo::STACK_DUE_TO_USE:
6258       if (UsesCheck(AI))
6259         continue;
6260       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6261       LLVM_FALLTHROUGH;
6262     case AllocationInfo::STACK_DUE_TO_FREE:
6263       if (FreeCheck(AI))
6264         continue;
6265       AI.Status = AllocationInfo::INVALID;
6266       Changed = ChangeStatus::CHANGED;
6267       continue;
6268     case AllocationInfo::INVALID:
6269       llvm_unreachable("Invalid allocations should never reach this point!");
6270     };
6271   }
6272 
6273   return Changed;
6274 }
6275 
6276 /// ----------------------- Privatizable Pointers ------------------------------
6277 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6278   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6279       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6280 
6281   ChangeStatus indicatePessimisticFixpoint() override {
6282     AAPrivatizablePtr::indicatePessimisticFixpoint();
6283     PrivatizableType = nullptr;
6284     return ChangeStatus::CHANGED;
6285   }
6286 
6287   /// Identify the type we can chose for a private copy of the underlying
6288   /// argument. None means it is not clear yet, nullptr means there is none.
6289   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6290 
6291   /// Return a privatizable type that encloses both T0 and T1.
6292   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6293   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6294     if (!T0.hasValue())
6295       return T1;
6296     if (!T1.hasValue())
6297       return T0;
6298     if (T0 == T1)
6299       return T0;
6300     return nullptr;
6301   }
6302 
6303   Optional<Type *> getPrivatizableType() const override {
6304     return PrivatizableType;
6305   }
6306 
6307   const std::string getAsStr() const override {
6308     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6309   }
6310 
6311 protected:
6312   Optional<Type *> PrivatizableType;
6313 };
6314 
6315 // TODO: Do this for call site arguments (probably also other values) as well.
6316 
6317 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6318   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6319       : AAPrivatizablePtrImpl(IRP, A) {}
6320 
6321   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6322   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6323     // If this is a byval argument and we know all the call sites (so we can
6324     // rewrite them), there is no need to check them explicitly.
6325     bool AllCallSitesKnown;
6326     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6327         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6328                                true, AllCallSitesKnown))
6329       return getAssociatedValue().getType()->getPointerElementType();
6330 
6331     Optional<Type *> Ty;
6332     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6333 
6334     // Make sure the associated call site argument has the same type at all call
6335     // sites and it is an allocation we know is safe to privatize, for now that
6336     // means we only allow alloca instructions.
6337     // TODO: We can additionally analyze the accesses in the callee to  create
6338     //       the type from that information instead. That is a little more
6339     //       involved and will be done in a follow up patch.
6340     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6341       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6342       // Check if a coresponding argument was found or if it is one not
6343       // associated (which can happen for callback calls).
6344       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6345         return false;
6346 
6347       // Check that all call sites agree on a type.
6348       auto &PrivCSArgAA =
6349           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6350       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6351 
6352       LLVM_DEBUG({
6353         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6354         if (CSTy.hasValue() && CSTy.getValue())
6355           CSTy.getValue()->print(dbgs());
6356         else if (CSTy.hasValue())
6357           dbgs() << "<nullptr>";
6358         else
6359           dbgs() << "<none>";
6360       });
6361 
6362       Ty = combineTypes(Ty, CSTy);
6363 
6364       LLVM_DEBUG({
6365         dbgs() << " : New Type: ";
6366         if (Ty.hasValue() && Ty.getValue())
6367           Ty.getValue()->print(dbgs());
6368         else if (Ty.hasValue())
6369           dbgs() << "<nullptr>";
6370         else
6371           dbgs() << "<none>";
6372         dbgs() << "\n";
6373       });
6374 
6375       return !Ty.hasValue() || Ty.getValue();
6376     };
6377 
6378     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6379       return nullptr;
6380     return Ty;
6381   }
6382 
6383   /// See AbstractAttribute::updateImpl(...).
6384   ChangeStatus updateImpl(Attributor &A) override {
6385     PrivatizableType = identifyPrivatizableType(A);
6386     if (!PrivatizableType.hasValue())
6387       return ChangeStatus::UNCHANGED;
6388     if (!PrivatizableType.getValue())
6389       return indicatePessimisticFixpoint();
6390 
6391     // The dependence is optional so we don't give up once we give up on the
6392     // alignment.
6393     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6394                         DepClassTy::OPTIONAL);
6395 
6396     // Avoid arguments with padding for now.
6397     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6398         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6399                                                 A.getInfoCache().getDL())) {
6400       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6401       return indicatePessimisticFixpoint();
6402     }
6403 
6404     // Verify callee and caller agree on how the promoted argument would be
6405     // passed.
6406     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6407     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6408     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6409     Function &Fn = *getIRPosition().getAnchorScope();
6410     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6411     ArgsToPromote.insert(getAssociatedArgument());
6412     const auto *TTI =
6413         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6414     if (!TTI ||
6415         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6416             Fn, *TTI, ArgsToPromote, Dummy) ||
6417         ArgsToPromote.empty()) {
6418       LLVM_DEBUG(
6419           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6420                  << Fn.getName() << "\n");
6421       return indicatePessimisticFixpoint();
6422     }
6423 
6424     // Collect the types that will replace the privatizable type in the function
6425     // signature.
6426     SmallVector<Type *, 16> ReplacementTypes;
6427     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6428 
6429     // Register a rewrite of the argument.
6430     Argument *Arg = getAssociatedArgument();
6431     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6432       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6433       return indicatePessimisticFixpoint();
6434     }
6435 
6436     unsigned ArgNo = Arg->getArgNo();
6437 
6438     // Helper to check if for the given call site the associated argument is
6439     // passed to a callback where the privatization would be different.
6440     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6441       SmallVector<const Use *, 4> CallbackUses;
6442       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6443       for (const Use *U : CallbackUses) {
6444         AbstractCallSite CBACS(U);
6445         assert(CBACS && CBACS.isCallbackCall());
6446         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6447           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6448 
6449           LLVM_DEBUG({
6450             dbgs()
6451                 << "[AAPrivatizablePtr] Argument " << *Arg
6452                 << "check if can be privatized in the context of its parent ("
6453                 << Arg->getParent()->getName()
6454                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6455                    "callback ("
6456                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6457                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6458                 << CBACS.getCallArgOperand(CBArg) << " vs "
6459                 << CB.getArgOperand(ArgNo) << "\n"
6460                 << "[AAPrivatizablePtr] " << CBArg << " : "
6461                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6462           });
6463 
6464           if (CBArgNo != int(ArgNo))
6465             continue;
6466           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6467               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6468           if (CBArgPrivAA.isValidState()) {
6469             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6470             if (!CBArgPrivTy.hasValue())
6471               continue;
6472             if (CBArgPrivTy.getValue() == PrivatizableType)
6473               continue;
6474           }
6475 
6476           LLVM_DEBUG({
6477             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6478                    << " cannot be privatized in the context of its parent ("
6479                    << Arg->getParent()->getName()
6480                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6481                       "callback ("
6482                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6483                    << ").\n[AAPrivatizablePtr] for which the argument "
6484                       "privatization is not compatible.\n";
6485           });
6486           return false;
6487         }
6488       }
6489       return true;
6490     };
6491 
6492     // Helper to check if for the given call site the associated argument is
6493     // passed to a direct call where the privatization would be different.
6494     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6495       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6496       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6497       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6498              "Expected a direct call operand for callback call operand");
6499 
6500       LLVM_DEBUG({
6501         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6502                << " check if be privatized in the context of its parent ("
6503                << Arg->getParent()->getName()
6504                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6505                   "direct call of ("
6506                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6507                << ").\n";
6508       });
6509 
6510       Function *DCCallee = DC->getCalledFunction();
6511       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6512         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6513             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6514             DepClassTy::REQUIRED);
6515         if (DCArgPrivAA.isValidState()) {
6516           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6517           if (!DCArgPrivTy.hasValue())
6518             return true;
6519           if (DCArgPrivTy.getValue() == PrivatizableType)
6520             return true;
6521         }
6522       }
6523 
6524       LLVM_DEBUG({
6525         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6526                << " cannot be privatized in the context of its parent ("
6527                << Arg->getParent()->getName()
6528                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6529                   "direct call of ("
6530                << ACS.getInstruction()->getCalledFunction()->getName()
6531                << ").\n[AAPrivatizablePtr] for which the argument "
6532                   "privatization is not compatible.\n";
6533       });
6534       return false;
6535     };
6536 
6537     // Helper to check if the associated argument is used at the given abstract
6538     // call site in a way that is incompatible with the privatization assumed
6539     // here.
6540     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6541       if (ACS.isDirectCall())
6542         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6543       if (ACS.isCallbackCall())
6544         return IsCompatiblePrivArgOfDirectCS(ACS);
6545       return false;
6546     };
6547 
6548     bool AllCallSitesKnown;
6549     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6550                                 AllCallSitesKnown))
6551       return indicatePessimisticFixpoint();
6552 
6553     return ChangeStatus::UNCHANGED;
6554   }
6555 
6556   /// Given a type to private \p PrivType, collect the constituates (which are
6557   /// used) in \p ReplacementTypes.
6558   static void
6559   identifyReplacementTypes(Type *PrivType,
6560                            SmallVectorImpl<Type *> &ReplacementTypes) {
6561     // TODO: For now we expand the privatization type to the fullest which can
6562     //       lead to dead arguments that need to be removed later.
6563     assert(PrivType && "Expected privatizable type!");
6564 
6565     // Traverse the type, extract constituate types on the outermost level.
6566     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6567       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6568         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6569     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6570       ReplacementTypes.append(PrivArrayType->getNumElements(),
6571                               PrivArrayType->getElementType());
6572     } else {
6573       ReplacementTypes.push_back(PrivType);
6574     }
6575   }
6576 
6577   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6578   /// The values needed are taken from the arguments of \p F starting at
6579   /// position \p ArgNo.
6580   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6581                                    unsigned ArgNo, Instruction &IP) {
6582     assert(PrivType && "Expected privatizable type!");
6583 
6584     IRBuilder<NoFolder> IRB(&IP);
6585     const DataLayout &DL = F.getParent()->getDataLayout();
6586 
6587     // Traverse the type, build GEPs and stores.
6588     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6589       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6590       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6591         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6592         Value *Ptr =
6593             constructPointer(PointeeTy, PrivType, &Base,
6594                              PrivStructLayout->getElementOffset(u), IRB, DL);
6595         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6596       }
6597     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6598       Type *PointeeTy = PrivArrayType->getElementType();
6599       Type *PointeePtrTy = PointeeTy->getPointerTo();
6600       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6601       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6602         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6603                                       u * PointeeTySize, IRB, DL);
6604         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6605       }
6606     } else {
6607       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6608     }
6609   }
6610 
6611   /// Extract values from \p Base according to the type \p PrivType at the
6612   /// call position \p ACS. The values are appended to \p ReplacementValues.
6613   void createReplacementValues(Align Alignment, Type *PrivType,
6614                                AbstractCallSite ACS, Value *Base,
6615                                SmallVectorImpl<Value *> &ReplacementValues) {
6616     assert(Base && "Expected base value!");
6617     assert(PrivType && "Expected privatizable type!");
6618     Instruction *IP = ACS.getInstruction();
6619 
6620     IRBuilder<NoFolder> IRB(IP);
6621     const DataLayout &DL = IP->getModule()->getDataLayout();
6622 
6623     if (Base->getType()->getPointerElementType() != PrivType)
6624       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6625                                                  "", ACS.getInstruction());
6626 
6627     // Traverse the type, build GEPs and loads.
6628     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6629       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6630       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6631         Type *PointeeTy = PrivStructType->getElementType(u);
6632         Value *Ptr =
6633             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6634                              PrivStructLayout->getElementOffset(u), IRB, DL);
6635         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6636         L->setAlignment(Alignment);
6637         ReplacementValues.push_back(L);
6638       }
6639     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6640       Type *PointeeTy = PrivArrayType->getElementType();
6641       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6642       Type *PointeePtrTy = PointeeTy->getPointerTo();
6643       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6644         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6645                                       u * PointeeTySize, IRB, DL);
6646         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6647         L->setAlignment(Alignment);
6648         ReplacementValues.push_back(L);
6649       }
6650     } else {
6651       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6652       L->setAlignment(Alignment);
6653       ReplacementValues.push_back(L);
6654     }
6655   }
6656 
6657   /// See AbstractAttribute::manifest(...)
6658   ChangeStatus manifest(Attributor &A) override {
6659     if (!PrivatizableType.hasValue())
6660       return ChangeStatus::UNCHANGED;
6661     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6662 
6663     // Collect all tail calls in the function as we cannot allow new allocas to
6664     // escape into tail recursion.
6665     // TODO: Be smarter about new allocas escaping into tail calls.
6666     SmallVector<CallInst *, 16> TailCalls;
6667     bool UsedAssumedInformation = false;
6668     if (!A.checkForAllInstructions(
6669             [&](Instruction &I) {
6670               CallInst &CI = cast<CallInst>(I);
6671               if (CI.isTailCall())
6672                 TailCalls.push_back(&CI);
6673               return true;
6674             },
6675             *this, {Instruction::Call}, UsedAssumedInformation))
6676       return ChangeStatus::UNCHANGED;
6677 
6678     Argument *Arg = getAssociatedArgument();
6679     // Query AAAlign attribute for alignment of associated argument to
6680     // determine the best alignment of loads.
6681     const auto &AlignAA =
6682         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6683 
6684     // Callback to repair the associated function. A new alloca is placed at the
6685     // beginning and initialized with the values passed through arguments. The
6686     // new alloca replaces the use of the old pointer argument.
6687     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6688         [=](const Attributor::ArgumentReplacementInfo &ARI,
6689             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6690           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6691           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6692           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6693                                            Arg->getName() + ".priv", IP);
6694           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6695                                ArgIt->getArgNo(), *IP);
6696 
6697           if (AI->getType() != Arg->getType())
6698             AI =
6699                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6700           Arg->replaceAllUsesWith(AI);
6701 
6702           for (CallInst *CI : TailCalls)
6703             CI->setTailCall(false);
6704         };
6705 
6706     // Callback to repair a call site of the associated function. The elements
6707     // of the privatizable type are loaded prior to the call and passed to the
6708     // new function version.
6709     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6710         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6711                       AbstractCallSite ACS,
6712                       SmallVectorImpl<Value *> &NewArgOperands) {
6713           // When no alignment is specified for the load instruction,
6714           // natural alignment is assumed.
6715           createReplacementValues(
6716               assumeAligned(AlignAA.getAssumedAlign()),
6717               PrivatizableType.getValue(), ACS,
6718               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6719               NewArgOperands);
6720         };
6721 
6722     // Collect the types that will replace the privatizable type in the function
6723     // signature.
6724     SmallVector<Type *, 16> ReplacementTypes;
6725     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6726 
6727     // Register a rewrite of the argument.
6728     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6729                                            std::move(FnRepairCB),
6730                                            std::move(ACSRepairCB)))
6731       return ChangeStatus::CHANGED;
6732     return ChangeStatus::UNCHANGED;
6733   }
6734 
6735   /// See AbstractAttribute::trackStatistics()
6736   void trackStatistics() const override {
6737     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6738   }
6739 };
6740 
6741 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6742   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6743       : AAPrivatizablePtrImpl(IRP, A) {}
6744 
6745   /// See AbstractAttribute::initialize(...).
6746   virtual void initialize(Attributor &A) override {
6747     // TODO: We can privatize more than arguments.
6748     indicatePessimisticFixpoint();
6749   }
6750 
6751   ChangeStatus updateImpl(Attributor &A) override {
6752     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6753                      "updateImpl will not be called");
6754   }
6755 
6756   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6757   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6758     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6759     if (!Obj) {
6760       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6761       return nullptr;
6762     }
6763 
6764     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6765       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6766         if (CI->isOne())
6767           return Obj->getType()->getPointerElementType();
6768     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6769       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6770           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6771       if (PrivArgAA.isAssumedPrivatizablePtr())
6772         return Obj->getType()->getPointerElementType();
6773     }
6774 
6775     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6776                          "alloca nor privatizable argument: "
6777                       << *Obj << "!\n");
6778     return nullptr;
6779   }
6780 
6781   /// See AbstractAttribute::trackStatistics()
6782   void trackStatistics() const override {
6783     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6784   }
6785 };
6786 
6787 struct AAPrivatizablePtrCallSiteArgument final
6788     : public AAPrivatizablePtrFloating {
6789   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6790       : AAPrivatizablePtrFloating(IRP, A) {}
6791 
6792   /// See AbstractAttribute::initialize(...).
6793   void initialize(Attributor &A) override {
6794     if (getIRPosition().hasAttr(Attribute::ByVal))
6795       indicateOptimisticFixpoint();
6796   }
6797 
6798   /// See AbstractAttribute::updateImpl(...).
6799   ChangeStatus updateImpl(Attributor &A) override {
6800     PrivatizableType = identifyPrivatizableType(A);
6801     if (!PrivatizableType.hasValue())
6802       return ChangeStatus::UNCHANGED;
6803     if (!PrivatizableType.getValue())
6804       return indicatePessimisticFixpoint();
6805 
6806     const IRPosition &IRP = getIRPosition();
6807     auto &NoCaptureAA =
6808         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6809     if (!NoCaptureAA.isAssumedNoCapture()) {
6810       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6811       return indicatePessimisticFixpoint();
6812     }
6813 
6814     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6815     if (!NoAliasAA.isAssumedNoAlias()) {
6816       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6817       return indicatePessimisticFixpoint();
6818     }
6819 
6820     const auto &MemBehaviorAA =
6821         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6822     if (!MemBehaviorAA.isAssumedReadOnly()) {
6823       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6824       return indicatePessimisticFixpoint();
6825     }
6826 
6827     return ChangeStatus::UNCHANGED;
6828   }
6829 
6830   /// See AbstractAttribute::trackStatistics()
6831   void trackStatistics() const override {
6832     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6833   }
6834 };
6835 
6836 struct AAPrivatizablePtrCallSiteReturned final
6837     : public AAPrivatizablePtrFloating {
6838   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6839       : AAPrivatizablePtrFloating(IRP, A) {}
6840 
6841   /// See AbstractAttribute::initialize(...).
6842   void initialize(Attributor &A) override {
6843     // TODO: We can privatize more than arguments.
6844     indicatePessimisticFixpoint();
6845   }
6846 
6847   /// See AbstractAttribute::trackStatistics()
6848   void trackStatistics() const override {
6849     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6850   }
6851 };
6852 
6853 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6854   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6855       : AAPrivatizablePtrFloating(IRP, A) {}
6856 
6857   /// See AbstractAttribute::initialize(...).
6858   void initialize(Attributor &A) override {
6859     // TODO: We can privatize more than arguments.
6860     indicatePessimisticFixpoint();
6861   }
6862 
6863   /// See AbstractAttribute::trackStatistics()
6864   void trackStatistics() const override {
6865     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6866   }
6867 };
6868 
6869 /// -------------------- Memory Behavior Attributes ----------------------------
6870 /// Includes read-none, read-only, and write-only.
6871 /// ----------------------------------------------------------------------------
6872 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6873   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6874       : AAMemoryBehavior(IRP, A) {}
6875 
6876   /// See AbstractAttribute::initialize(...).
6877   void initialize(Attributor &A) override {
6878     intersectAssumedBits(BEST_STATE);
6879     getKnownStateFromValue(getIRPosition(), getState());
6880     AAMemoryBehavior::initialize(A);
6881   }
6882 
6883   /// Return the memory behavior information encoded in the IR for \p IRP.
6884   static void getKnownStateFromValue(const IRPosition &IRP,
6885                                      BitIntegerState &State,
6886                                      bool IgnoreSubsumingPositions = false) {
6887     SmallVector<Attribute, 2> Attrs;
6888     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6889     for (const Attribute &Attr : Attrs) {
6890       switch (Attr.getKindAsEnum()) {
6891       case Attribute::ReadNone:
6892         State.addKnownBits(NO_ACCESSES);
6893         break;
6894       case Attribute::ReadOnly:
6895         State.addKnownBits(NO_WRITES);
6896         break;
6897       case Attribute::WriteOnly:
6898         State.addKnownBits(NO_READS);
6899         break;
6900       default:
6901         llvm_unreachable("Unexpected attribute!");
6902       }
6903     }
6904 
6905     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6906       if (!I->mayReadFromMemory())
6907         State.addKnownBits(NO_READS);
6908       if (!I->mayWriteToMemory())
6909         State.addKnownBits(NO_WRITES);
6910     }
6911   }
6912 
6913   /// See AbstractAttribute::getDeducedAttributes(...).
6914   void getDeducedAttributes(LLVMContext &Ctx,
6915                             SmallVectorImpl<Attribute> &Attrs) const override {
6916     assert(Attrs.size() == 0);
6917     if (isAssumedReadNone())
6918       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6919     else if (isAssumedReadOnly())
6920       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6921     else if (isAssumedWriteOnly())
6922       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6923     assert(Attrs.size() <= 1);
6924   }
6925 
6926   /// See AbstractAttribute::manifest(...).
6927   ChangeStatus manifest(Attributor &A) override {
6928     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6929       return ChangeStatus::UNCHANGED;
6930 
6931     const IRPosition &IRP = getIRPosition();
6932 
6933     // Check if we would improve the existing attributes first.
6934     SmallVector<Attribute, 4> DeducedAttrs;
6935     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6936     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6937           return IRP.hasAttr(Attr.getKindAsEnum(),
6938                              /* IgnoreSubsumingPositions */ true);
6939         }))
6940       return ChangeStatus::UNCHANGED;
6941 
6942     // Clear existing attributes.
6943     IRP.removeAttrs(AttrKinds);
6944 
6945     // Use the generic manifest method.
6946     return IRAttribute::manifest(A);
6947   }
6948 
6949   /// See AbstractState::getAsStr().
6950   const std::string getAsStr() const override {
6951     if (isAssumedReadNone())
6952       return "readnone";
6953     if (isAssumedReadOnly())
6954       return "readonly";
6955     if (isAssumedWriteOnly())
6956       return "writeonly";
6957     return "may-read/write";
6958   }
6959 
6960   /// The set of IR attributes AAMemoryBehavior deals with.
6961   static const Attribute::AttrKind AttrKinds[3];
6962 };
6963 
6964 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6965     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6966 
6967 /// Memory behavior attribute for a floating value.
6968 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6969   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6970       : AAMemoryBehaviorImpl(IRP, A) {}
6971 
6972   /// See AbstractAttribute::updateImpl(...).
6973   ChangeStatus updateImpl(Attributor &A) override;
6974 
6975   /// See AbstractAttribute::trackStatistics()
6976   void trackStatistics() const override {
6977     if (isAssumedReadNone())
6978       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6979     else if (isAssumedReadOnly())
6980       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6981     else if (isAssumedWriteOnly())
6982       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6983   }
6984 
6985 private:
6986   /// Return true if users of \p UserI might access the underlying
6987   /// variable/location described by \p U and should therefore be analyzed.
6988   bool followUsersOfUseIn(Attributor &A, const Use &U,
6989                           const Instruction *UserI);
6990 
6991   /// Update the state according to the effect of use \p U in \p UserI.
6992   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
6993 };
6994 
6995 /// Memory behavior attribute for function argument.
6996 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6997   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6998       : AAMemoryBehaviorFloating(IRP, A) {}
6999 
7000   /// See AbstractAttribute::initialize(...).
7001   void initialize(Attributor &A) override {
7002     intersectAssumedBits(BEST_STATE);
7003     const IRPosition &IRP = getIRPosition();
7004     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7005     // can query it when we use has/getAttr. That would allow us to reuse the
7006     // initialize of the base class here.
7007     bool HasByVal =
7008         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7009     getKnownStateFromValue(IRP, getState(),
7010                            /* IgnoreSubsumingPositions */ HasByVal);
7011 
7012     // Initialize the use vector with all direct uses of the associated value.
7013     Argument *Arg = getAssociatedArgument();
7014     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7015       indicatePessimisticFixpoint();
7016   }
7017 
7018   ChangeStatus manifest(Attributor &A) override {
7019     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7020     if (!getAssociatedValue().getType()->isPointerTy())
7021       return ChangeStatus::UNCHANGED;
7022 
7023     // TODO: From readattrs.ll: "inalloca parameters are always
7024     //                           considered written"
7025     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7026       removeKnownBits(NO_WRITES);
7027       removeAssumedBits(NO_WRITES);
7028     }
7029     return AAMemoryBehaviorFloating::manifest(A);
7030   }
7031 
7032   /// See AbstractAttribute::trackStatistics()
7033   void trackStatistics() const override {
7034     if (isAssumedReadNone())
7035       STATS_DECLTRACK_ARG_ATTR(readnone)
7036     else if (isAssumedReadOnly())
7037       STATS_DECLTRACK_ARG_ATTR(readonly)
7038     else if (isAssumedWriteOnly())
7039       STATS_DECLTRACK_ARG_ATTR(writeonly)
7040   }
7041 };
7042 
7043 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7044   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7045       : AAMemoryBehaviorArgument(IRP, A) {}
7046 
7047   /// See AbstractAttribute::initialize(...).
7048   void initialize(Attributor &A) override {
7049     // If we don't have an associated attribute this is either a variadic call
7050     // or an indirect call, either way, nothing to do here.
7051     Argument *Arg = getAssociatedArgument();
7052     if (!Arg) {
7053       indicatePessimisticFixpoint();
7054       return;
7055     }
7056     if (Arg->hasByValAttr()) {
7057       addKnownBits(NO_WRITES);
7058       removeKnownBits(NO_READS);
7059       removeAssumedBits(NO_READS);
7060     }
7061     AAMemoryBehaviorArgument::initialize(A);
7062     if (getAssociatedFunction()->isDeclaration())
7063       indicatePessimisticFixpoint();
7064   }
7065 
7066   /// See AbstractAttribute::updateImpl(...).
7067   ChangeStatus updateImpl(Attributor &A) override {
7068     // TODO: Once we have call site specific value information we can provide
7069     //       call site specific liveness liveness information and then it makes
7070     //       sense to specialize attributes for call sites arguments instead of
7071     //       redirecting requests to the callee argument.
7072     Argument *Arg = getAssociatedArgument();
7073     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7074     auto &ArgAA =
7075         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7076     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7077   }
7078 
7079   /// See AbstractAttribute::trackStatistics()
7080   void trackStatistics() const override {
7081     if (isAssumedReadNone())
7082       STATS_DECLTRACK_CSARG_ATTR(readnone)
7083     else if (isAssumedReadOnly())
7084       STATS_DECLTRACK_CSARG_ATTR(readonly)
7085     else if (isAssumedWriteOnly())
7086       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7087   }
7088 };
7089 
7090 /// Memory behavior attribute for a call site return position.
7091 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7092   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7093       : AAMemoryBehaviorFloating(IRP, A) {}
7094 
7095   /// See AbstractAttribute::initialize(...).
7096   void initialize(Attributor &A) override {
7097     AAMemoryBehaviorImpl::initialize(A);
7098     Function *F = getAssociatedFunction();
7099     if (!F || F->isDeclaration())
7100       indicatePessimisticFixpoint();
7101   }
7102 
7103   /// See AbstractAttribute::manifest(...).
7104   ChangeStatus manifest(Attributor &A) override {
7105     // We do not annotate returned values.
7106     return ChangeStatus::UNCHANGED;
7107   }
7108 
7109   /// See AbstractAttribute::trackStatistics()
7110   void trackStatistics() const override {}
7111 };
7112 
7113 /// An AA to represent the memory behavior function attributes.
7114 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7115   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7116       : AAMemoryBehaviorImpl(IRP, A) {}
7117 
7118   /// See AbstractAttribute::updateImpl(Attributor &A).
7119   virtual ChangeStatus updateImpl(Attributor &A) override;
7120 
7121   /// See AbstractAttribute::manifest(...).
7122   ChangeStatus manifest(Attributor &A) override {
7123     Function &F = cast<Function>(getAnchorValue());
7124     if (isAssumedReadNone()) {
7125       F.removeFnAttr(Attribute::ArgMemOnly);
7126       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7127       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7128     }
7129     return AAMemoryBehaviorImpl::manifest(A);
7130   }
7131 
7132   /// See AbstractAttribute::trackStatistics()
7133   void trackStatistics() const override {
7134     if (isAssumedReadNone())
7135       STATS_DECLTRACK_FN_ATTR(readnone)
7136     else if (isAssumedReadOnly())
7137       STATS_DECLTRACK_FN_ATTR(readonly)
7138     else if (isAssumedWriteOnly())
7139       STATS_DECLTRACK_FN_ATTR(writeonly)
7140   }
7141 };
7142 
7143 /// AAMemoryBehavior attribute for call sites.
7144 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7145   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7146       : AAMemoryBehaviorImpl(IRP, A) {}
7147 
7148   /// See AbstractAttribute::initialize(...).
7149   void initialize(Attributor &A) override {
7150     AAMemoryBehaviorImpl::initialize(A);
7151     Function *F = getAssociatedFunction();
7152     if (!F || F->isDeclaration())
7153       indicatePessimisticFixpoint();
7154   }
7155 
7156   /// See AbstractAttribute::updateImpl(...).
7157   ChangeStatus updateImpl(Attributor &A) override {
7158     // TODO: Once we have call site specific value information we can provide
7159     //       call site specific liveness liveness information and then it makes
7160     //       sense to specialize attributes for call sites arguments instead of
7161     //       redirecting requests to the callee argument.
7162     Function *F = getAssociatedFunction();
7163     const IRPosition &FnPos = IRPosition::function(*F);
7164     auto &FnAA =
7165         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7166     return clampStateAndIndicateChange(getState(), FnAA.getState());
7167   }
7168 
7169   /// See AbstractAttribute::trackStatistics()
7170   void trackStatistics() const override {
7171     if (isAssumedReadNone())
7172       STATS_DECLTRACK_CS_ATTR(readnone)
7173     else if (isAssumedReadOnly())
7174       STATS_DECLTRACK_CS_ATTR(readonly)
7175     else if (isAssumedWriteOnly())
7176       STATS_DECLTRACK_CS_ATTR(writeonly)
7177   }
7178 };
7179 
7180 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7181 
7182   // The current assumed state used to determine a change.
7183   auto AssumedState = getAssumed();
7184 
7185   auto CheckRWInst = [&](Instruction &I) {
7186     // If the instruction has an own memory behavior state, use it to restrict
7187     // the local state. No further analysis is required as the other memory
7188     // state is as optimistic as it gets.
7189     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7190       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7191           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7192       intersectAssumedBits(MemBehaviorAA.getAssumed());
7193       return !isAtFixpoint();
7194     }
7195 
7196     // Remove access kind modifiers if necessary.
7197     if (I.mayReadFromMemory())
7198       removeAssumedBits(NO_READS);
7199     if (I.mayWriteToMemory())
7200       removeAssumedBits(NO_WRITES);
7201     return !isAtFixpoint();
7202   };
7203 
7204   bool UsedAssumedInformation = false;
7205   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7206                                           UsedAssumedInformation))
7207     return indicatePessimisticFixpoint();
7208 
7209   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7210                                         : ChangeStatus::UNCHANGED;
7211 }
7212 
7213 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7214 
7215   const IRPosition &IRP = getIRPosition();
7216   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7217   AAMemoryBehavior::StateType &S = getState();
7218 
7219   // First, check the function scope. We take the known information and we avoid
7220   // work if the assumed information implies the current assumed information for
7221   // this attribute. This is a valid for all but byval arguments.
7222   Argument *Arg = IRP.getAssociatedArgument();
7223   AAMemoryBehavior::base_t FnMemAssumedState =
7224       AAMemoryBehavior::StateType::getWorstState();
7225   if (!Arg || !Arg->hasByValAttr()) {
7226     const auto &FnMemAA =
7227         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7228     FnMemAssumedState = FnMemAA.getAssumed();
7229     S.addKnownBits(FnMemAA.getKnown());
7230     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7231       return ChangeStatus::UNCHANGED;
7232   }
7233 
7234   // The current assumed state used to determine a change.
7235   auto AssumedState = S.getAssumed();
7236 
7237   // Make sure the value is not captured (except through "return"), if
7238   // it is, any information derived would be irrelevant anyway as we cannot
7239   // check the potential aliases introduced by the capture. However, no need
7240   // to fall back to anythign less optimistic than the function state.
7241   const auto &ArgNoCaptureAA =
7242       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7243   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7244     S.intersectAssumedBits(FnMemAssumedState);
7245     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7246                                           : ChangeStatus::UNCHANGED;
7247   }
7248 
7249   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7250   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7251     Instruction *UserI = cast<Instruction>(U.getUser());
7252     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7253                       << " \n");
7254 
7255     // Droppable users, e.g., llvm::assume does not actually perform any action.
7256     if (UserI->isDroppable())
7257       return true;
7258 
7259     // Check if the users of UserI should also be visited.
7260     Follow = followUsersOfUseIn(A, U, UserI);
7261 
7262     // If UserI might touch memory we analyze the use in detail.
7263     if (UserI->mayReadOrWriteMemory())
7264       analyzeUseIn(A, U, UserI);
7265 
7266     return !isAtFixpoint();
7267   };
7268 
7269   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7270     return indicatePessimisticFixpoint();
7271 
7272   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7273                                         : ChangeStatus::UNCHANGED;
7274 }
7275 
7276 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7277                                                   const Instruction *UserI) {
7278   // The loaded value is unrelated to the pointer argument, no need to
7279   // follow the users of the load.
7280   if (isa<LoadInst>(UserI))
7281     return false;
7282 
7283   // By default we follow all uses assuming UserI might leak information on U,
7284   // we have special handling for call sites operands though.
7285   const auto *CB = dyn_cast<CallBase>(UserI);
7286   if (!CB || !CB->isArgOperand(&U))
7287     return true;
7288 
7289   // If the use is a call argument known not to be captured, the users of
7290   // the call do not need to be visited because they have to be unrelated to
7291   // the input. Note that this check is not trivial even though we disallow
7292   // general capturing of the underlying argument. The reason is that the
7293   // call might the argument "through return", which we allow and for which we
7294   // need to check call users.
7295   if (U.get()->getType()->isPointerTy()) {
7296     unsigned ArgNo = CB->getArgOperandNo(&U);
7297     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7298         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7299     return !ArgNoCaptureAA.isAssumedNoCapture();
7300   }
7301 
7302   return true;
7303 }
7304 
7305 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7306                                             const Instruction *UserI) {
7307   assert(UserI->mayReadOrWriteMemory());
7308 
7309   switch (UserI->getOpcode()) {
7310   default:
7311     // TODO: Handle all atomics and other side-effect operations we know of.
7312     break;
7313   case Instruction::Load:
7314     // Loads cause the NO_READS property to disappear.
7315     removeAssumedBits(NO_READS);
7316     return;
7317 
7318   case Instruction::Store:
7319     // Stores cause the NO_WRITES property to disappear if the use is the
7320     // pointer operand. Note that we do assume that capturing was taken care of
7321     // somewhere else.
7322     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7323       removeAssumedBits(NO_WRITES);
7324     return;
7325 
7326   case Instruction::Call:
7327   case Instruction::CallBr:
7328   case Instruction::Invoke: {
7329     // For call sites we look at the argument memory behavior attribute (this
7330     // could be recursive!) in order to restrict our own state.
7331     const auto *CB = cast<CallBase>(UserI);
7332 
7333     // Give up on operand bundles.
7334     if (CB->isBundleOperand(&U)) {
7335       indicatePessimisticFixpoint();
7336       return;
7337     }
7338 
7339     // Calling a function does read the function pointer, maybe write it if the
7340     // function is self-modifying.
7341     if (CB->isCallee(&U)) {
7342       removeAssumedBits(NO_READS);
7343       break;
7344     }
7345 
7346     // Adjust the possible access behavior based on the information on the
7347     // argument.
7348     IRPosition Pos;
7349     if (U.get()->getType()->isPointerTy())
7350       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7351     else
7352       Pos = IRPosition::callsite_function(*CB);
7353     const auto &MemBehaviorAA =
7354         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7355     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7356     // and at least "known".
7357     intersectAssumedBits(MemBehaviorAA.getAssumed());
7358     return;
7359   }
7360   };
7361 
7362   // Generally, look at the "may-properties" and adjust the assumed state if we
7363   // did not trigger special handling before.
7364   if (UserI->mayReadFromMemory())
7365     removeAssumedBits(NO_READS);
7366   if (UserI->mayWriteToMemory())
7367     removeAssumedBits(NO_WRITES);
7368 }
7369 
7370 /// -------------------- Memory Locations Attributes ---------------------------
7371 /// Includes read-none, argmemonly, inaccessiblememonly,
7372 /// inaccessiblememorargmemonly
7373 /// ----------------------------------------------------------------------------
7374 
7375 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7376     AAMemoryLocation::MemoryLocationsKind MLK) {
7377   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7378     return "all memory";
7379   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7380     return "no memory";
7381   std::string S = "memory:";
7382   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7383     S += "stack,";
7384   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7385     S += "constant,";
7386   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7387     S += "internal global,";
7388   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7389     S += "external global,";
7390   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7391     S += "argument,";
7392   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7393     S += "inaccessible,";
7394   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7395     S += "malloced,";
7396   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7397     S += "unknown,";
7398   S.pop_back();
7399   return S;
7400 }
7401 
7402 namespace {
7403 struct AAMemoryLocationImpl : public AAMemoryLocation {
7404 
7405   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7406       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7407     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7408       AccessKind2Accesses[u] = nullptr;
7409   }
7410 
7411   ~AAMemoryLocationImpl() {
7412     // The AccessSets are allocated via a BumpPtrAllocator, we call
7413     // the destructor manually.
7414     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7415       if (AccessKind2Accesses[u])
7416         AccessKind2Accesses[u]->~AccessSet();
7417   }
7418 
7419   /// See AbstractAttribute::initialize(...).
7420   void initialize(Attributor &A) override {
7421     intersectAssumedBits(BEST_STATE);
7422     getKnownStateFromValue(A, getIRPosition(), getState());
7423     AAMemoryLocation::initialize(A);
7424   }
7425 
7426   /// Return the memory behavior information encoded in the IR for \p IRP.
7427   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7428                                      BitIntegerState &State,
7429                                      bool IgnoreSubsumingPositions = false) {
7430     // For internal functions we ignore `argmemonly` and
7431     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7432     // constant propagation. It is unclear if this is the best way but it is
7433     // unlikely this will cause real performance problems. If we are deriving
7434     // attributes for the anchor function we even remove the attribute in
7435     // addition to ignoring it.
7436     bool UseArgMemOnly = true;
7437     Function *AnchorFn = IRP.getAnchorScope();
7438     if (AnchorFn && A.isRunOn(*AnchorFn))
7439       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7440 
7441     SmallVector<Attribute, 2> Attrs;
7442     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7443     for (const Attribute &Attr : Attrs) {
7444       switch (Attr.getKindAsEnum()) {
7445       case Attribute::ReadNone:
7446         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7447         break;
7448       case Attribute::InaccessibleMemOnly:
7449         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7450         break;
7451       case Attribute::ArgMemOnly:
7452         if (UseArgMemOnly)
7453           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7454         else
7455           IRP.removeAttrs({Attribute::ArgMemOnly});
7456         break;
7457       case Attribute::InaccessibleMemOrArgMemOnly:
7458         if (UseArgMemOnly)
7459           State.addKnownBits(inverseLocation(
7460               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7461         else
7462           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7463         break;
7464       default:
7465         llvm_unreachable("Unexpected attribute!");
7466       }
7467     }
7468   }
7469 
7470   /// See AbstractAttribute::getDeducedAttributes(...).
7471   void getDeducedAttributes(LLVMContext &Ctx,
7472                             SmallVectorImpl<Attribute> &Attrs) const override {
7473     assert(Attrs.size() == 0);
7474     if (isAssumedReadNone()) {
7475       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7476     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7477       if (isAssumedInaccessibleMemOnly())
7478         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7479       else if (isAssumedArgMemOnly())
7480         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7481       else if (isAssumedInaccessibleOrArgMemOnly())
7482         Attrs.push_back(
7483             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7484     }
7485     assert(Attrs.size() <= 1);
7486   }
7487 
7488   /// See AbstractAttribute::manifest(...).
7489   ChangeStatus manifest(Attributor &A) override {
7490     const IRPosition &IRP = getIRPosition();
7491 
7492     // Check if we would improve the existing attributes first.
7493     SmallVector<Attribute, 4> DeducedAttrs;
7494     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7495     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7496           return IRP.hasAttr(Attr.getKindAsEnum(),
7497                              /* IgnoreSubsumingPositions */ true);
7498         }))
7499       return ChangeStatus::UNCHANGED;
7500 
7501     // Clear existing attributes.
7502     IRP.removeAttrs(AttrKinds);
7503     if (isAssumedReadNone())
7504       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7505 
7506     // Use the generic manifest method.
7507     return IRAttribute::manifest(A);
7508   }
7509 
7510   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7511   bool checkForAllAccessesToMemoryKind(
7512       function_ref<bool(const Instruction *, const Value *, AccessKind,
7513                         MemoryLocationsKind)>
7514           Pred,
7515       MemoryLocationsKind RequestedMLK) const override {
7516     if (!isValidState())
7517       return false;
7518 
7519     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7520     if (AssumedMLK == NO_LOCATIONS)
7521       return true;
7522 
7523     unsigned Idx = 0;
7524     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7525          CurMLK *= 2, ++Idx) {
7526       if (CurMLK & RequestedMLK)
7527         continue;
7528 
7529       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7530         for (const AccessInfo &AI : *Accesses)
7531           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7532             return false;
7533     }
7534 
7535     return true;
7536   }
7537 
7538   ChangeStatus indicatePessimisticFixpoint() override {
7539     // If we give up and indicate a pessimistic fixpoint this instruction will
7540     // become an access for all potential access kinds:
7541     // TODO: Add pointers for argmemonly and globals to improve the results of
7542     //       checkForAllAccessesToMemoryKind.
7543     bool Changed = false;
7544     MemoryLocationsKind KnownMLK = getKnown();
7545     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7546     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7547       if (!(CurMLK & KnownMLK))
7548         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7549                                   getAccessKindFromInst(I));
7550     return AAMemoryLocation::indicatePessimisticFixpoint();
7551   }
7552 
7553 protected:
7554   /// Helper struct to tie together an instruction that has a read or write
7555   /// effect with the pointer it accesses (if any).
7556   struct AccessInfo {
7557 
7558     /// The instruction that caused the access.
7559     const Instruction *I;
7560 
7561     /// The base pointer that is accessed, or null if unknown.
7562     const Value *Ptr;
7563 
7564     /// The kind of access (read/write/read+write).
7565     AccessKind Kind;
7566 
7567     bool operator==(const AccessInfo &RHS) const {
7568       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7569     }
7570     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7571       if (LHS.I != RHS.I)
7572         return LHS.I < RHS.I;
7573       if (LHS.Ptr != RHS.Ptr)
7574         return LHS.Ptr < RHS.Ptr;
7575       if (LHS.Kind != RHS.Kind)
7576         return LHS.Kind < RHS.Kind;
7577       return false;
7578     }
7579   };
7580 
7581   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7582   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7583   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7584   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7585 
7586   /// Categorize the pointer arguments of CB that might access memory in
7587   /// AccessedLoc and update the state and access map accordingly.
7588   void
7589   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7590                                      AAMemoryLocation::StateType &AccessedLocs,
7591                                      bool &Changed);
7592 
7593   /// Return the kind(s) of location that may be accessed by \p V.
7594   AAMemoryLocation::MemoryLocationsKind
7595   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7596 
7597   /// Return the access kind as determined by \p I.
7598   AccessKind getAccessKindFromInst(const Instruction *I) {
7599     AccessKind AK = READ_WRITE;
7600     if (I) {
7601       AK = I->mayReadFromMemory() ? READ : NONE;
7602       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7603     }
7604     return AK;
7605   }
7606 
7607   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7608   /// an access of kind \p AK to a \p MLK memory location with the access
7609   /// pointer \p Ptr.
7610   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7611                                  MemoryLocationsKind MLK, const Instruction *I,
7612                                  const Value *Ptr, bool &Changed,
7613                                  AccessKind AK = READ_WRITE) {
7614 
7615     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7616     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7617     if (!Accesses)
7618       Accesses = new (Allocator) AccessSet();
7619     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7620     State.removeAssumedBits(MLK);
7621   }
7622 
7623   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7624   /// arguments, and update the state and access map accordingly.
7625   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7626                           AAMemoryLocation::StateType &State, bool &Changed);
7627 
7628   /// Used to allocate access sets.
7629   BumpPtrAllocator &Allocator;
7630 
7631   /// The set of IR attributes AAMemoryLocation deals with.
7632   static const Attribute::AttrKind AttrKinds[4];
7633 };
7634 
7635 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7636     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7637     Attribute::InaccessibleMemOrArgMemOnly};
7638 
7639 void AAMemoryLocationImpl::categorizePtrValue(
7640     Attributor &A, const Instruction &I, const Value &Ptr,
7641     AAMemoryLocation::StateType &State, bool &Changed) {
7642   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7643                     << Ptr << " ["
7644                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7645 
7646   SmallVector<Value *, 8> Objects;
7647   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7648     LLVM_DEBUG(
7649         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7650     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7651                               getAccessKindFromInst(&I));
7652     return;
7653   }
7654 
7655   for (Value *Obj : Objects) {
7656     // TODO: recognize the TBAA used for constant accesses.
7657     MemoryLocationsKind MLK = NO_LOCATIONS;
7658     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7659     if (isa<UndefValue>(Obj))
7660       continue;
7661     if (isa<Argument>(Obj)) {
7662       // TODO: For now we do not treat byval arguments as local copies performed
7663       // on the call edge, though, we should. To make that happen we need to
7664       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7665       // would also allow us to mark functions only accessing byval arguments as
7666       // readnone again, atguably their acceses have no effect outside of the
7667       // function, like accesses to allocas.
7668       MLK = NO_ARGUMENT_MEM;
7669     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7670       // Reading constant memory is not treated as a read "effect" by the
7671       // function attr pass so we won't neither. Constants defined by TBAA are
7672       // similar. (We know we do not write it because it is constant.)
7673       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7674         if (GVar->isConstant())
7675           continue;
7676 
7677       if (GV->hasLocalLinkage())
7678         MLK = NO_GLOBAL_INTERNAL_MEM;
7679       else
7680         MLK = NO_GLOBAL_EXTERNAL_MEM;
7681     } else if (isa<ConstantPointerNull>(Obj) &&
7682                !NullPointerIsDefined(getAssociatedFunction(),
7683                                      Ptr.getType()->getPointerAddressSpace())) {
7684       continue;
7685     } else if (isa<AllocaInst>(Obj)) {
7686       MLK = NO_LOCAL_MEM;
7687     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7688       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7689           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7690       if (NoAliasAA.isAssumedNoAlias())
7691         MLK = NO_MALLOCED_MEM;
7692       else
7693         MLK = NO_UNKOWN_MEM;
7694     } else {
7695       MLK = NO_UNKOWN_MEM;
7696     }
7697 
7698     assert(MLK != NO_LOCATIONS && "No location specified!");
7699     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7700                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7701                       << "\n");
7702     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7703                               getAccessKindFromInst(&I));
7704   }
7705 
7706   LLVM_DEBUG(
7707       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7708              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7709 }
7710 
7711 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7712     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7713     bool &Changed) {
7714   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7715 
7716     // Skip non-pointer arguments.
7717     const Value *ArgOp = CB.getArgOperand(ArgNo);
7718     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7719       continue;
7720 
7721     // Skip readnone arguments.
7722     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7723     const auto &ArgOpMemLocationAA =
7724         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7725 
7726     if (ArgOpMemLocationAA.isAssumedReadNone())
7727       continue;
7728 
7729     // Categorize potentially accessed pointer arguments as if there was an
7730     // access instruction with them as pointer.
7731     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7732   }
7733 }
7734 
7735 AAMemoryLocation::MemoryLocationsKind
7736 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7737                                                   bool &Changed) {
7738   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7739                     << I << "\n");
7740 
7741   AAMemoryLocation::StateType AccessedLocs;
7742   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7743 
7744   if (auto *CB = dyn_cast<CallBase>(&I)) {
7745 
7746     // First check if we assume any memory is access is visible.
7747     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7748         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7749     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7750                       << " [" << CBMemLocationAA << "]\n");
7751 
7752     if (CBMemLocationAA.isAssumedReadNone())
7753       return NO_LOCATIONS;
7754 
7755     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7756       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7757                                 Changed, getAccessKindFromInst(&I));
7758       return AccessedLocs.getAssumed();
7759     }
7760 
7761     uint32_t CBAssumedNotAccessedLocs =
7762         CBMemLocationAA.getAssumedNotAccessedLocation();
7763 
7764     // Set the argmemonly and global bit as we handle them separately below.
7765     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7766         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7767 
7768     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7769       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7770         continue;
7771       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7772                                 getAccessKindFromInst(&I));
7773     }
7774 
7775     // Now handle global memory if it might be accessed. This is slightly tricky
7776     // as NO_GLOBAL_MEM has multiple bits set.
7777     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7778     if (HasGlobalAccesses) {
7779       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7780                             AccessKind Kind, MemoryLocationsKind MLK) {
7781         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7782                                   getAccessKindFromInst(&I));
7783         return true;
7784       };
7785       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7786               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7787         return AccessedLocs.getWorstState();
7788     }
7789 
7790     LLVM_DEBUG(
7791         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7792                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7793 
7794     // Now handle argument memory if it might be accessed.
7795     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7796     if (HasArgAccesses)
7797       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7798 
7799     LLVM_DEBUG(
7800         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7801                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7802 
7803     return AccessedLocs.getAssumed();
7804   }
7805 
7806   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7807     LLVM_DEBUG(
7808         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7809                << I << " [" << *Ptr << "]\n");
7810     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7811     return AccessedLocs.getAssumed();
7812   }
7813 
7814   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7815                     << I << "\n");
7816   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7817                             getAccessKindFromInst(&I));
7818   return AccessedLocs.getAssumed();
7819 }
7820 
7821 /// An AA to represent the memory behavior function attributes.
7822 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7823   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7824       : AAMemoryLocationImpl(IRP, A) {}
7825 
7826   /// See AbstractAttribute::updateImpl(Attributor &A).
7827   virtual ChangeStatus updateImpl(Attributor &A) override {
7828 
7829     const auto &MemBehaviorAA =
7830         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7831     if (MemBehaviorAA.isAssumedReadNone()) {
7832       if (MemBehaviorAA.isKnownReadNone())
7833         return indicateOptimisticFixpoint();
7834       assert(isAssumedReadNone() &&
7835              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7836       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7837       return ChangeStatus::UNCHANGED;
7838     }
7839 
7840     // The current assumed state used to determine a change.
7841     auto AssumedState = getAssumed();
7842     bool Changed = false;
7843 
7844     auto CheckRWInst = [&](Instruction &I) {
7845       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7846       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7847                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7848       removeAssumedBits(inverseLocation(MLK, false, false));
7849       // Stop once only the valid bit set in the *not assumed location*, thus
7850       // once we don't actually exclude any memory locations in the state.
7851       return getAssumedNotAccessedLocation() != VALID_STATE;
7852     };
7853 
7854     bool UsedAssumedInformation = false;
7855     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7856                                             UsedAssumedInformation))
7857       return indicatePessimisticFixpoint();
7858 
7859     Changed |= AssumedState != getAssumed();
7860     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7861   }
7862 
7863   /// See AbstractAttribute::trackStatistics()
7864   void trackStatistics() const override {
7865     if (isAssumedReadNone())
7866       STATS_DECLTRACK_FN_ATTR(readnone)
7867     else if (isAssumedArgMemOnly())
7868       STATS_DECLTRACK_FN_ATTR(argmemonly)
7869     else if (isAssumedInaccessibleMemOnly())
7870       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7871     else if (isAssumedInaccessibleOrArgMemOnly())
7872       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7873   }
7874 };
7875 
7876 /// AAMemoryLocation attribute for call sites.
7877 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7878   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7879       : AAMemoryLocationImpl(IRP, A) {}
7880 
7881   /// See AbstractAttribute::initialize(...).
7882   void initialize(Attributor &A) override {
7883     AAMemoryLocationImpl::initialize(A);
7884     Function *F = getAssociatedFunction();
7885     if (!F || F->isDeclaration())
7886       indicatePessimisticFixpoint();
7887   }
7888 
7889   /// See AbstractAttribute::updateImpl(...).
7890   ChangeStatus updateImpl(Attributor &A) override {
7891     // TODO: Once we have call site specific value information we can provide
7892     //       call site specific liveness liveness information and then it makes
7893     //       sense to specialize attributes for call sites arguments instead of
7894     //       redirecting requests to the callee argument.
7895     Function *F = getAssociatedFunction();
7896     const IRPosition &FnPos = IRPosition::function(*F);
7897     auto &FnAA =
7898         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7899     bool Changed = false;
7900     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7901                           AccessKind Kind, MemoryLocationsKind MLK) {
7902       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7903                                 getAccessKindFromInst(I));
7904       return true;
7905     };
7906     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7907       return indicatePessimisticFixpoint();
7908     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7909   }
7910 
7911   /// See AbstractAttribute::trackStatistics()
7912   void trackStatistics() const override {
7913     if (isAssumedReadNone())
7914       STATS_DECLTRACK_CS_ATTR(readnone)
7915   }
7916 };
7917 
7918 /// ------------------ Value Constant Range Attribute -------------------------
7919 
7920 struct AAValueConstantRangeImpl : AAValueConstantRange {
7921   using StateType = IntegerRangeState;
7922   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7923       : AAValueConstantRange(IRP, A) {}
7924 
7925   /// See AbstractAttribute::initialize(..).
7926   void initialize(Attributor &A) override {
7927     if (A.hasSimplificationCallback(getIRPosition())) {
7928       indicatePessimisticFixpoint();
7929       return;
7930     }
7931 
7932     // Intersect a range given by SCEV.
7933     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7934 
7935     // Intersect a range given by LVI.
7936     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7937   }
7938 
7939   /// See AbstractAttribute::getAsStr().
7940   const std::string getAsStr() const override {
7941     std::string Str;
7942     llvm::raw_string_ostream OS(Str);
7943     OS << "range(" << getBitWidth() << ")<";
7944     getKnown().print(OS);
7945     OS << " / ";
7946     getAssumed().print(OS);
7947     OS << ">";
7948     return OS.str();
7949   }
7950 
7951   /// Helper function to get a SCEV expr for the associated value at program
7952   /// point \p I.
7953   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7954     if (!getAnchorScope())
7955       return nullptr;
7956 
7957     ScalarEvolution *SE =
7958         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7959             *getAnchorScope());
7960 
7961     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7962         *getAnchorScope());
7963 
7964     if (!SE || !LI)
7965       return nullptr;
7966 
7967     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7968     if (!I)
7969       return S;
7970 
7971     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7972   }
7973 
7974   /// Helper function to get a range from SCEV for the associated value at
7975   /// program point \p I.
7976   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7977                                          const Instruction *I = nullptr) const {
7978     if (!getAnchorScope())
7979       return getWorstState(getBitWidth());
7980 
7981     ScalarEvolution *SE =
7982         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7983             *getAnchorScope());
7984 
7985     const SCEV *S = getSCEV(A, I);
7986     if (!SE || !S)
7987       return getWorstState(getBitWidth());
7988 
7989     return SE->getUnsignedRange(S);
7990   }
7991 
7992   /// Helper function to get a range from LVI for the associated value at
7993   /// program point \p I.
7994   ConstantRange
7995   getConstantRangeFromLVI(Attributor &A,
7996                           const Instruction *CtxI = nullptr) const {
7997     if (!getAnchorScope())
7998       return getWorstState(getBitWidth());
7999 
8000     LazyValueInfo *LVI =
8001         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8002             *getAnchorScope());
8003 
8004     if (!LVI || !CtxI)
8005       return getWorstState(getBitWidth());
8006     return LVI->getConstantRange(&getAssociatedValue(),
8007                                  const_cast<Instruction *>(CtxI));
8008   }
8009 
8010   /// Return true if \p CtxI is valid for querying outside analyses.
8011   /// This basically makes sure we do not ask intra-procedural analysis
8012   /// about a context in the wrong function or a context that violates
8013   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8014   /// if the original context of this AA is OK or should be considered invalid.
8015   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8016                                                const Instruction *CtxI,
8017                                                bool AllowAACtxI) const {
8018     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8019       return false;
8020 
8021     // Our context might be in a different function, neither intra-procedural
8022     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8023     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8024       return false;
8025 
8026     // If the context is not dominated by the value there are paths to the
8027     // context that do not define the value. This cannot be handled by
8028     // LazyValueInfo so we need to bail.
8029     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8030       InformationCache &InfoCache = A.getInfoCache();
8031       const DominatorTree *DT =
8032           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8033               *I->getFunction());
8034       return DT && DT->dominates(I, CtxI);
8035     }
8036 
8037     return true;
8038   }
8039 
8040   /// See AAValueConstantRange::getKnownConstantRange(..).
8041   ConstantRange
8042   getKnownConstantRange(Attributor &A,
8043                         const Instruction *CtxI = nullptr) const override {
8044     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8045                                                  /* AllowAACtxI */ false))
8046       return getKnown();
8047 
8048     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8049     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8050     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8051   }
8052 
8053   /// See AAValueConstantRange::getAssumedConstantRange(..).
8054   ConstantRange
8055   getAssumedConstantRange(Attributor &A,
8056                           const Instruction *CtxI = nullptr) const override {
8057     // TODO: Make SCEV use Attributor assumption.
8058     //       We may be able to bound a variable range via assumptions in
8059     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8060     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8061     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8062                                                  /* AllowAACtxI */ false))
8063       return getAssumed();
8064 
8065     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8066     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8067     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8068   }
8069 
8070   /// Helper function to create MDNode for range metadata.
8071   static MDNode *
8072   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8073                             const ConstantRange &AssumedConstantRange) {
8074     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8075                                   Ty, AssumedConstantRange.getLower())),
8076                               ConstantAsMetadata::get(ConstantInt::get(
8077                                   Ty, AssumedConstantRange.getUpper()))};
8078     return MDNode::get(Ctx, LowAndHigh);
8079   }
8080 
8081   /// Return true if \p Assumed is included in \p KnownRanges.
8082   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8083 
8084     if (Assumed.isFullSet())
8085       return false;
8086 
8087     if (!KnownRanges)
8088       return true;
8089 
8090     // If multiple ranges are annotated in IR, we give up to annotate assumed
8091     // range for now.
8092 
8093     // TODO:  If there exists a known range which containts assumed range, we
8094     // can say assumed range is better.
8095     if (KnownRanges->getNumOperands() > 2)
8096       return false;
8097 
8098     ConstantInt *Lower =
8099         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8100     ConstantInt *Upper =
8101         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8102 
8103     ConstantRange Known(Lower->getValue(), Upper->getValue());
8104     return Known.contains(Assumed) && Known != Assumed;
8105   }
8106 
8107   /// Helper function to set range metadata.
8108   static bool
8109   setRangeMetadataIfisBetterRange(Instruction *I,
8110                                   const ConstantRange &AssumedConstantRange) {
8111     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8112     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8113       if (!AssumedConstantRange.isEmptySet()) {
8114         I->setMetadata(LLVMContext::MD_range,
8115                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8116                                                  AssumedConstantRange));
8117         return true;
8118       }
8119     }
8120     return false;
8121   }
8122 
8123   /// See AbstractAttribute::manifest()
8124   ChangeStatus manifest(Attributor &A) override {
8125     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8126     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8127     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8128 
8129     auto &V = getAssociatedValue();
8130     if (!AssumedConstantRange.isEmptySet() &&
8131         !AssumedConstantRange.isSingleElement()) {
8132       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8133         assert(I == getCtxI() && "Should not annotate an instruction which is "
8134                                  "not the context instruction");
8135         if (isa<CallInst>(I) || isa<LoadInst>(I))
8136           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8137             Changed = ChangeStatus::CHANGED;
8138       }
8139     }
8140 
8141     return Changed;
8142   }
8143 };
8144 
8145 struct AAValueConstantRangeArgument final
8146     : AAArgumentFromCallSiteArguments<
8147           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8148           true /* BridgeCallBaseContext */> {
8149   using Base = AAArgumentFromCallSiteArguments<
8150       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8151       true /* BridgeCallBaseContext */>;
8152   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8153       : Base(IRP, A) {}
8154 
8155   /// See AbstractAttribute::initialize(..).
8156   void initialize(Attributor &A) override {
8157     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8158       indicatePessimisticFixpoint();
8159     } else {
8160       Base::initialize(A);
8161     }
8162   }
8163 
8164   /// See AbstractAttribute::trackStatistics()
8165   void trackStatistics() const override {
8166     STATS_DECLTRACK_ARG_ATTR(value_range)
8167   }
8168 };
8169 
8170 struct AAValueConstantRangeReturned
8171     : AAReturnedFromReturnedValues<AAValueConstantRange,
8172                                    AAValueConstantRangeImpl,
8173                                    AAValueConstantRangeImpl::StateType,
8174                                    /* PropogateCallBaseContext */ true> {
8175   using Base =
8176       AAReturnedFromReturnedValues<AAValueConstantRange,
8177                                    AAValueConstantRangeImpl,
8178                                    AAValueConstantRangeImpl::StateType,
8179                                    /* PropogateCallBaseContext */ true>;
8180   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8181       : Base(IRP, A) {}
8182 
8183   /// See AbstractAttribute::initialize(...).
8184   void initialize(Attributor &A) override {}
8185 
8186   /// See AbstractAttribute::trackStatistics()
8187   void trackStatistics() const override {
8188     STATS_DECLTRACK_FNRET_ATTR(value_range)
8189   }
8190 };
8191 
8192 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8193   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8194       : AAValueConstantRangeImpl(IRP, A) {}
8195 
8196   /// See AbstractAttribute::initialize(...).
8197   void initialize(Attributor &A) override {
8198     AAValueConstantRangeImpl::initialize(A);
8199     if (isAtFixpoint())
8200       return;
8201 
8202     Value &V = getAssociatedValue();
8203 
8204     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8205       unionAssumed(ConstantRange(C->getValue()));
8206       indicateOptimisticFixpoint();
8207       return;
8208     }
8209 
8210     if (isa<UndefValue>(&V)) {
8211       // Collapse the undef state to 0.
8212       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8213       indicateOptimisticFixpoint();
8214       return;
8215     }
8216 
8217     if (isa<CallBase>(&V))
8218       return;
8219 
8220     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8221       return;
8222 
8223     // If it is a load instruction with range metadata, use it.
8224     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8225       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8226         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8227         return;
8228       }
8229 
8230     // We can work with PHI and select instruction as we traverse their operands
8231     // during update.
8232     if (isa<SelectInst>(V) || isa<PHINode>(V))
8233       return;
8234 
8235     // Otherwise we give up.
8236     indicatePessimisticFixpoint();
8237 
8238     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8239                       << getAssociatedValue() << "\n");
8240   }
8241 
8242   bool calculateBinaryOperator(
8243       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8244       const Instruction *CtxI,
8245       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8246     Value *LHS = BinOp->getOperand(0);
8247     Value *RHS = BinOp->getOperand(1);
8248 
8249     // Simplify the operands first.
8250     bool UsedAssumedInformation = false;
8251     const auto &SimplifiedLHS =
8252         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8253                                *this, UsedAssumedInformation);
8254     if (!SimplifiedLHS.hasValue())
8255       return true;
8256     if (!SimplifiedLHS.getValue())
8257       return false;
8258     LHS = *SimplifiedLHS;
8259 
8260     const auto &SimplifiedRHS =
8261         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8262                                *this, UsedAssumedInformation);
8263     if (!SimplifiedRHS.hasValue())
8264       return true;
8265     if (!SimplifiedRHS.getValue())
8266       return false;
8267     RHS = *SimplifiedRHS;
8268 
8269     // TODO: Allow non integers as well.
8270     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8271       return false;
8272 
8273     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8274         *this, IRPosition::value(*LHS, getCallBaseContext()),
8275         DepClassTy::REQUIRED);
8276     QuerriedAAs.push_back(&LHSAA);
8277     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8278 
8279     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8280         *this, IRPosition::value(*RHS, getCallBaseContext()),
8281         DepClassTy::REQUIRED);
8282     QuerriedAAs.push_back(&RHSAA);
8283     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8284 
8285     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8286 
8287     T.unionAssumed(AssumedRange);
8288 
8289     // TODO: Track a known state too.
8290 
8291     return T.isValidState();
8292   }
8293 
8294   bool calculateCastInst(
8295       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8296       const Instruction *CtxI,
8297       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8298     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8299     // TODO: Allow non integers as well.
8300     Value *OpV = CastI->getOperand(0);
8301 
8302     // Simplify the operand first.
8303     bool UsedAssumedInformation = false;
8304     const auto &SimplifiedOpV =
8305         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8306                                *this, UsedAssumedInformation);
8307     if (!SimplifiedOpV.hasValue())
8308       return true;
8309     if (!SimplifiedOpV.getValue())
8310       return false;
8311     OpV = *SimplifiedOpV;
8312 
8313     if (!OpV->getType()->isIntegerTy())
8314       return false;
8315 
8316     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8317         *this, IRPosition::value(*OpV, getCallBaseContext()),
8318         DepClassTy::REQUIRED);
8319     QuerriedAAs.push_back(&OpAA);
8320     T.unionAssumed(
8321         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8322     return T.isValidState();
8323   }
8324 
8325   bool
8326   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8327                    const Instruction *CtxI,
8328                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8329     Value *LHS = CmpI->getOperand(0);
8330     Value *RHS = CmpI->getOperand(1);
8331 
8332     // Simplify the operands first.
8333     bool UsedAssumedInformation = false;
8334     const auto &SimplifiedLHS =
8335         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8336                                *this, UsedAssumedInformation);
8337     if (!SimplifiedLHS.hasValue())
8338       return true;
8339     if (!SimplifiedLHS.getValue())
8340       return false;
8341     LHS = *SimplifiedLHS;
8342 
8343     const auto &SimplifiedRHS =
8344         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8345                                *this, UsedAssumedInformation);
8346     if (!SimplifiedRHS.hasValue())
8347       return true;
8348     if (!SimplifiedRHS.getValue())
8349       return false;
8350     RHS = *SimplifiedRHS;
8351 
8352     // TODO: Allow non integers as well.
8353     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8354       return false;
8355 
8356     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8357         *this, IRPosition::value(*LHS, getCallBaseContext()),
8358         DepClassTy::REQUIRED);
8359     QuerriedAAs.push_back(&LHSAA);
8360     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8361         *this, IRPosition::value(*RHS, getCallBaseContext()),
8362         DepClassTy::REQUIRED);
8363     QuerriedAAs.push_back(&RHSAA);
8364     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8365     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8366 
8367     // If one of them is empty set, we can't decide.
8368     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8369       return true;
8370 
8371     bool MustTrue = false, MustFalse = false;
8372 
8373     auto AllowedRegion =
8374         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8375 
8376     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8377       MustFalse = true;
8378 
8379     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8380       MustTrue = true;
8381 
8382     assert((!MustTrue || !MustFalse) &&
8383            "Either MustTrue or MustFalse should be false!");
8384 
8385     if (MustTrue)
8386       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8387     else if (MustFalse)
8388       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8389     else
8390       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8391 
8392     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8393                       << " " << RHSAA << "\n");
8394 
8395     // TODO: Track a known state too.
8396     return T.isValidState();
8397   }
8398 
8399   /// See AbstractAttribute::updateImpl(...).
8400   ChangeStatus updateImpl(Attributor &A) override {
8401     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8402                             IntegerRangeState &T, bool Stripped) -> bool {
8403       Instruction *I = dyn_cast<Instruction>(&V);
8404       if (!I || isa<CallBase>(I)) {
8405 
8406         // Simplify the operand first.
8407         bool UsedAssumedInformation = false;
8408         const auto &SimplifiedOpV =
8409             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8410                                    *this, UsedAssumedInformation);
8411         if (!SimplifiedOpV.hasValue())
8412           return true;
8413         if (!SimplifiedOpV.getValue())
8414           return false;
8415         Value *VPtr = *SimplifiedOpV;
8416 
8417         // If the value is not instruction, we query AA to Attributor.
8418         const auto &AA = A.getAAFor<AAValueConstantRange>(
8419             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8420             DepClassTy::REQUIRED);
8421 
8422         // Clamp operator is not used to utilize a program point CtxI.
8423         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8424 
8425         return T.isValidState();
8426       }
8427 
8428       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8429       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8430         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8431           return false;
8432       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8433         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8434           return false;
8435       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8436         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8437           return false;
8438       } else {
8439         // Give up with other instructions.
8440         // TODO: Add other instructions
8441 
8442         T.indicatePessimisticFixpoint();
8443         return false;
8444       }
8445 
8446       // Catch circular reasoning in a pessimistic way for now.
8447       // TODO: Check how the range evolves and if we stripped anything, see also
8448       //       AADereferenceable or AAAlign for similar situations.
8449       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8450         if (QueriedAA != this)
8451           continue;
8452         // If we are in a stady state we do not need to worry.
8453         if (T.getAssumed() == getState().getAssumed())
8454           continue;
8455         T.indicatePessimisticFixpoint();
8456       }
8457 
8458       return T.isValidState();
8459     };
8460 
8461     IntegerRangeState T(getBitWidth());
8462 
8463     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8464                                                   VisitValueCB, getCtxI(),
8465                                                   /* UseValueSimplify */ false))
8466       return indicatePessimisticFixpoint();
8467 
8468     return clampStateAndIndicateChange(getState(), T);
8469   }
8470 
8471   /// See AbstractAttribute::trackStatistics()
8472   void trackStatistics() const override {
8473     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8474   }
8475 };
8476 
8477 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8478   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8479       : AAValueConstantRangeImpl(IRP, A) {}
8480 
8481   /// See AbstractAttribute::initialize(...).
8482   ChangeStatus updateImpl(Attributor &A) override {
8483     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8484                      "not be called");
8485   }
8486 
8487   /// See AbstractAttribute::trackStatistics()
8488   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8489 };
8490 
8491 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8492   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8493       : AAValueConstantRangeFunction(IRP, A) {}
8494 
8495   /// See AbstractAttribute::trackStatistics()
8496   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8497 };
8498 
8499 struct AAValueConstantRangeCallSiteReturned
8500     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8501                                      AAValueConstantRangeImpl,
8502                                      AAValueConstantRangeImpl::StateType,
8503                                      /* IntroduceCallBaseContext */ true> {
8504   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8505       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8506                                        AAValueConstantRangeImpl,
8507                                        AAValueConstantRangeImpl::StateType,
8508                                        /* IntroduceCallBaseContext */ true>(IRP,
8509                                                                             A) {
8510   }
8511 
8512   /// See AbstractAttribute::initialize(...).
8513   void initialize(Attributor &A) override {
8514     // If it is a load instruction with range metadata, use the metadata.
8515     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8516       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8517         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8518 
8519     AAValueConstantRangeImpl::initialize(A);
8520   }
8521 
8522   /// See AbstractAttribute::trackStatistics()
8523   void trackStatistics() const override {
8524     STATS_DECLTRACK_CSRET_ATTR(value_range)
8525   }
8526 };
8527 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8528   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8529       : AAValueConstantRangeFloating(IRP, A) {}
8530 
8531   /// See AbstractAttribute::manifest()
8532   ChangeStatus manifest(Attributor &A) override {
8533     return ChangeStatus::UNCHANGED;
8534   }
8535 
8536   /// See AbstractAttribute::trackStatistics()
8537   void trackStatistics() const override {
8538     STATS_DECLTRACK_CSARG_ATTR(value_range)
8539   }
8540 };
8541 
8542 /// ------------------ Potential Values Attribute -------------------------
8543 
8544 struct AAPotentialValuesImpl : AAPotentialValues {
8545   using StateType = PotentialConstantIntValuesState;
8546 
8547   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8548       : AAPotentialValues(IRP, A) {}
8549 
8550   /// See AbstractAttribute::initialize(..).
8551   void initialize(Attributor &A) override {
8552     if (A.hasSimplificationCallback(getIRPosition()))
8553       indicatePessimisticFixpoint();
8554     else
8555       AAPotentialValues::initialize(A);
8556   }
8557 
8558   /// See AbstractAttribute::getAsStr().
8559   const std::string getAsStr() const override {
8560     std::string Str;
8561     llvm::raw_string_ostream OS(Str);
8562     OS << getState();
8563     return OS.str();
8564   }
8565 
8566   /// See AbstractAttribute::updateImpl(...).
8567   ChangeStatus updateImpl(Attributor &A) override {
8568     return indicatePessimisticFixpoint();
8569   }
8570 };
8571 
8572 struct AAPotentialValuesArgument final
8573     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8574                                       PotentialConstantIntValuesState> {
8575   using Base =
8576       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8577                                       PotentialConstantIntValuesState>;
8578   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8579       : Base(IRP, A) {}
8580 
8581   /// See AbstractAttribute::initialize(..).
8582   void initialize(Attributor &A) override {
8583     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8584       indicatePessimisticFixpoint();
8585     } else {
8586       Base::initialize(A);
8587     }
8588   }
8589 
8590   /// See AbstractAttribute::trackStatistics()
8591   void trackStatistics() const override {
8592     STATS_DECLTRACK_ARG_ATTR(potential_values)
8593   }
8594 };
8595 
8596 struct AAPotentialValuesReturned
8597     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8598   using Base =
8599       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8600   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8601       : Base(IRP, A) {}
8602 
8603   /// See AbstractAttribute::trackStatistics()
8604   void trackStatistics() const override {
8605     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8606   }
8607 };
8608 
8609 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8610   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8611       : AAPotentialValuesImpl(IRP, A) {}
8612 
8613   /// See AbstractAttribute::initialize(..).
8614   void initialize(Attributor &A) override {
8615     AAPotentialValuesImpl::initialize(A);
8616     if (isAtFixpoint())
8617       return;
8618 
8619     Value &V = getAssociatedValue();
8620 
8621     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8622       unionAssumed(C->getValue());
8623       indicateOptimisticFixpoint();
8624       return;
8625     }
8626 
8627     if (isa<UndefValue>(&V)) {
8628       unionAssumedWithUndef();
8629       indicateOptimisticFixpoint();
8630       return;
8631     }
8632 
8633     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8634       return;
8635 
8636     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8637       return;
8638 
8639     indicatePessimisticFixpoint();
8640 
8641     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8642                       << getAssociatedValue() << "\n");
8643   }
8644 
8645   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8646                                 const APInt &RHS) {
8647     ICmpInst::Predicate Pred = ICI->getPredicate();
8648     switch (Pred) {
8649     case ICmpInst::ICMP_UGT:
8650       return LHS.ugt(RHS);
8651     case ICmpInst::ICMP_SGT:
8652       return LHS.sgt(RHS);
8653     case ICmpInst::ICMP_EQ:
8654       return LHS.eq(RHS);
8655     case ICmpInst::ICMP_UGE:
8656       return LHS.uge(RHS);
8657     case ICmpInst::ICMP_SGE:
8658       return LHS.sge(RHS);
8659     case ICmpInst::ICMP_ULT:
8660       return LHS.ult(RHS);
8661     case ICmpInst::ICMP_SLT:
8662       return LHS.slt(RHS);
8663     case ICmpInst::ICMP_NE:
8664       return LHS.ne(RHS);
8665     case ICmpInst::ICMP_ULE:
8666       return LHS.ule(RHS);
8667     case ICmpInst::ICMP_SLE:
8668       return LHS.sle(RHS);
8669     default:
8670       llvm_unreachable("Invalid ICmp predicate!");
8671     }
8672   }
8673 
8674   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8675                                  uint32_t ResultBitWidth) {
8676     Instruction::CastOps CastOp = CI->getOpcode();
8677     switch (CastOp) {
8678     default:
8679       llvm_unreachable("unsupported or not integer cast");
8680     case Instruction::Trunc:
8681       return Src.trunc(ResultBitWidth);
8682     case Instruction::SExt:
8683       return Src.sext(ResultBitWidth);
8684     case Instruction::ZExt:
8685       return Src.zext(ResultBitWidth);
8686     case Instruction::BitCast:
8687       return Src;
8688     }
8689   }
8690 
8691   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8692                                        const APInt &LHS, const APInt &RHS,
8693                                        bool &SkipOperation, bool &Unsupported) {
8694     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8695     // Unsupported is set to true when the binary operator is not supported.
8696     // SkipOperation is set to true when UB occur with the given operand pair
8697     // (LHS, RHS).
8698     // TODO: we should look at nsw and nuw keywords to handle operations
8699     //       that create poison or undef value.
8700     switch (BinOpcode) {
8701     default:
8702       Unsupported = true;
8703       return LHS;
8704     case Instruction::Add:
8705       return LHS + RHS;
8706     case Instruction::Sub:
8707       return LHS - RHS;
8708     case Instruction::Mul:
8709       return LHS * RHS;
8710     case Instruction::UDiv:
8711       if (RHS.isZero()) {
8712         SkipOperation = true;
8713         return LHS;
8714       }
8715       return LHS.udiv(RHS);
8716     case Instruction::SDiv:
8717       if (RHS.isZero()) {
8718         SkipOperation = true;
8719         return LHS;
8720       }
8721       return LHS.sdiv(RHS);
8722     case Instruction::URem:
8723       if (RHS.isZero()) {
8724         SkipOperation = true;
8725         return LHS;
8726       }
8727       return LHS.urem(RHS);
8728     case Instruction::SRem:
8729       if (RHS.isZero()) {
8730         SkipOperation = true;
8731         return LHS;
8732       }
8733       return LHS.srem(RHS);
8734     case Instruction::Shl:
8735       return LHS.shl(RHS);
8736     case Instruction::LShr:
8737       return LHS.lshr(RHS);
8738     case Instruction::AShr:
8739       return LHS.ashr(RHS);
8740     case Instruction::And:
8741       return LHS & RHS;
8742     case Instruction::Or:
8743       return LHS | RHS;
8744     case Instruction::Xor:
8745       return LHS ^ RHS;
8746     }
8747   }
8748 
8749   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8750                                            const APInt &LHS, const APInt &RHS) {
8751     bool SkipOperation = false;
8752     bool Unsupported = false;
8753     APInt Result =
8754         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8755     if (Unsupported)
8756       return false;
8757     // If SkipOperation is true, we can ignore this operand pair (L, R).
8758     if (!SkipOperation)
8759       unionAssumed(Result);
8760     return isValidState();
8761   }
8762 
8763   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8764     auto AssumedBefore = getAssumed();
8765     Value *LHS = ICI->getOperand(0);
8766     Value *RHS = ICI->getOperand(1);
8767 
8768     // Simplify the operands first.
8769     bool UsedAssumedInformation = false;
8770     const auto &SimplifiedLHS =
8771         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8772                                *this, UsedAssumedInformation);
8773     if (!SimplifiedLHS.hasValue())
8774       return ChangeStatus::UNCHANGED;
8775     if (!SimplifiedLHS.getValue())
8776       return indicatePessimisticFixpoint();
8777     LHS = *SimplifiedLHS;
8778 
8779     const auto &SimplifiedRHS =
8780         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8781                                *this, UsedAssumedInformation);
8782     if (!SimplifiedRHS.hasValue())
8783       return ChangeStatus::UNCHANGED;
8784     if (!SimplifiedRHS.getValue())
8785       return indicatePessimisticFixpoint();
8786     RHS = *SimplifiedRHS;
8787 
8788     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8789       return indicatePessimisticFixpoint();
8790 
8791     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8792                                                 DepClassTy::REQUIRED);
8793     if (!LHSAA.isValidState())
8794       return indicatePessimisticFixpoint();
8795 
8796     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8797                                                 DepClassTy::REQUIRED);
8798     if (!RHSAA.isValidState())
8799       return indicatePessimisticFixpoint();
8800 
8801     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8802     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8803 
8804     // TODO: make use of undef flag to limit potential values aggressively.
8805     bool MaybeTrue = false, MaybeFalse = false;
8806     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8807     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8808       // The result of any comparison between undefs can be soundly replaced
8809       // with undef.
8810       unionAssumedWithUndef();
8811     } else if (LHSAA.undefIsContained()) {
8812       for (const APInt &R : RHSAAPVS) {
8813         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8814         MaybeTrue |= CmpResult;
8815         MaybeFalse |= !CmpResult;
8816         if (MaybeTrue & MaybeFalse)
8817           return indicatePessimisticFixpoint();
8818       }
8819     } else if (RHSAA.undefIsContained()) {
8820       for (const APInt &L : LHSAAPVS) {
8821         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8822         MaybeTrue |= CmpResult;
8823         MaybeFalse |= !CmpResult;
8824         if (MaybeTrue & MaybeFalse)
8825           return indicatePessimisticFixpoint();
8826       }
8827     } else {
8828       for (const APInt &L : LHSAAPVS) {
8829         for (const APInt &R : RHSAAPVS) {
8830           bool CmpResult = calculateICmpInst(ICI, L, R);
8831           MaybeTrue |= CmpResult;
8832           MaybeFalse |= !CmpResult;
8833           if (MaybeTrue & MaybeFalse)
8834             return indicatePessimisticFixpoint();
8835         }
8836       }
8837     }
8838     if (MaybeTrue)
8839       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8840     if (MaybeFalse)
8841       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8842     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8843                                          : ChangeStatus::CHANGED;
8844   }
8845 
8846   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8847     auto AssumedBefore = getAssumed();
8848     Value *LHS = SI->getTrueValue();
8849     Value *RHS = SI->getFalseValue();
8850 
8851     // Simplify the operands first.
8852     bool UsedAssumedInformation = false;
8853     const auto &SimplifiedLHS =
8854         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8855                                *this, UsedAssumedInformation);
8856     if (!SimplifiedLHS.hasValue())
8857       return ChangeStatus::UNCHANGED;
8858     if (!SimplifiedLHS.getValue())
8859       return indicatePessimisticFixpoint();
8860     LHS = *SimplifiedLHS;
8861 
8862     const auto &SimplifiedRHS =
8863         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8864                                *this, UsedAssumedInformation);
8865     if (!SimplifiedRHS.hasValue())
8866       return ChangeStatus::UNCHANGED;
8867     if (!SimplifiedRHS.getValue())
8868       return indicatePessimisticFixpoint();
8869     RHS = *SimplifiedRHS;
8870 
8871     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8872       return indicatePessimisticFixpoint();
8873 
8874     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8875                                                   UsedAssumedInformation);
8876 
8877     // Check if we only need one operand.
8878     bool OnlyLeft = false, OnlyRight = false;
8879     if (C.hasValue() && *C && (*C)->isOneValue())
8880       OnlyLeft = true;
8881     else if (C.hasValue() && *C && (*C)->isZeroValue())
8882       OnlyRight = true;
8883 
8884     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8885     if (!OnlyRight) {
8886       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8887                                              DepClassTy::REQUIRED);
8888       if (!LHSAA->isValidState())
8889         return indicatePessimisticFixpoint();
8890     }
8891     if (!OnlyLeft) {
8892       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8893                                              DepClassTy::REQUIRED);
8894       if (!RHSAA->isValidState())
8895         return indicatePessimisticFixpoint();
8896     }
8897 
8898     if (!LHSAA || !RHSAA) {
8899       // select (true/false), lhs, rhs
8900       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8901 
8902       if (OpAA->undefIsContained())
8903         unionAssumedWithUndef();
8904       else
8905         unionAssumed(*OpAA);
8906 
8907     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8908       // select i1 *, undef , undef => undef
8909       unionAssumedWithUndef();
8910     } else {
8911       unionAssumed(*LHSAA);
8912       unionAssumed(*RHSAA);
8913     }
8914     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8915                                          : ChangeStatus::CHANGED;
8916   }
8917 
8918   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8919     auto AssumedBefore = getAssumed();
8920     if (!CI->isIntegerCast())
8921       return indicatePessimisticFixpoint();
8922     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8923     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8924     Value *Src = CI->getOperand(0);
8925 
8926     // Simplify the operand first.
8927     bool UsedAssumedInformation = false;
8928     const auto &SimplifiedSrc =
8929         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8930                                *this, UsedAssumedInformation);
8931     if (!SimplifiedSrc.hasValue())
8932       return ChangeStatus::UNCHANGED;
8933     if (!SimplifiedSrc.getValue())
8934       return indicatePessimisticFixpoint();
8935     Src = *SimplifiedSrc;
8936 
8937     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8938                                                 DepClassTy::REQUIRED);
8939     if (!SrcAA.isValidState())
8940       return indicatePessimisticFixpoint();
8941     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8942     if (SrcAA.undefIsContained())
8943       unionAssumedWithUndef();
8944     else {
8945       for (const APInt &S : SrcAAPVS) {
8946         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8947         unionAssumed(T);
8948       }
8949     }
8950     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8951                                          : ChangeStatus::CHANGED;
8952   }
8953 
8954   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8955     auto AssumedBefore = getAssumed();
8956     Value *LHS = BinOp->getOperand(0);
8957     Value *RHS = BinOp->getOperand(1);
8958 
8959     // Simplify the operands first.
8960     bool UsedAssumedInformation = false;
8961     const auto &SimplifiedLHS =
8962         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8963                                *this, UsedAssumedInformation);
8964     if (!SimplifiedLHS.hasValue())
8965       return ChangeStatus::UNCHANGED;
8966     if (!SimplifiedLHS.getValue())
8967       return indicatePessimisticFixpoint();
8968     LHS = *SimplifiedLHS;
8969 
8970     const auto &SimplifiedRHS =
8971         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8972                                *this, UsedAssumedInformation);
8973     if (!SimplifiedRHS.hasValue())
8974       return ChangeStatus::UNCHANGED;
8975     if (!SimplifiedRHS.getValue())
8976       return indicatePessimisticFixpoint();
8977     RHS = *SimplifiedRHS;
8978 
8979     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8980       return indicatePessimisticFixpoint();
8981 
8982     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8983                                                 DepClassTy::REQUIRED);
8984     if (!LHSAA.isValidState())
8985       return indicatePessimisticFixpoint();
8986 
8987     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8988                                                 DepClassTy::REQUIRED);
8989     if (!RHSAA.isValidState())
8990       return indicatePessimisticFixpoint();
8991 
8992     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8993     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8994     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
8995 
8996     // TODO: make use of undef flag to limit potential values aggressively.
8997     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8998       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
8999         return indicatePessimisticFixpoint();
9000     } else if (LHSAA.undefIsContained()) {
9001       for (const APInt &R : RHSAAPVS) {
9002         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9003           return indicatePessimisticFixpoint();
9004       }
9005     } else if (RHSAA.undefIsContained()) {
9006       for (const APInt &L : LHSAAPVS) {
9007         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9008           return indicatePessimisticFixpoint();
9009       }
9010     } else {
9011       for (const APInt &L : LHSAAPVS) {
9012         for (const APInt &R : RHSAAPVS) {
9013           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9014             return indicatePessimisticFixpoint();
9015         }
9016       }
9017     }
9018     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9019                                          : ChangeStatus::CHANGED;
9020   }
9021 
9022   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9023     auto AssumedBefore = getAssumed();
9024     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9025       Value *IncomingValue = PHI->getIncomingValue(u);
9026 
9027       // Simplify the operand first.
9028       bool UsedAssumedInformation = false;
9029       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9030           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9031           UsedAssumedInformation);
9032       if (!SimplifiedIncomingValue.hasValue())
9033         continue;
9034       if (!SimplifiedIncomingValue.getValue())
9035         return indicatePessimisticFixpoint();
9036       IncomingValue = *SimplifiedIncomingValue;
9037 
9038       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9039           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9040       if (!PotentialValuesAA.isValidState())
9041         return indicatePessimisticFixpoint();
9042       if (PotentialValuesAA.undefIsContained())
9043         unionAssumedWithUndef();
9044       else
9045         unionAssumed(PotentialValuesAA.getAssumed());
9046     }
9047     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9048                                          : ChangeStatus::CHANGED;
9049   }
9050 
9051   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9052     if (!L.getType()->isIntegerTy())
9053       return indicatePessimisticFixpoint();
9054 
9055     auto Union = [&](Value &V) {
9056       if (isa<UndefValue>(V)) {
9057         unionAssumedWithUndef();
9058         return true;
9059       }
9060       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9061         unionAssumed(CI->getValue());
9062         return true;
9063       }
9064       return false;
9065     };
9066     auto AssumedBefore = getAssumed();
9067 
9068     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9069       return indicatePessimisticFixpoint();
9070 
9071     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9072                                          : ChangeStatus::CHANGED;
9073   }
9074 
9075   /// See AbstractAttribute::updateImpl(...).
9076   ChangeStatus updateImpl(Attributor &A) override {
9077     Value &V = getAssociatedValue();
9078     Instruction *I = dyn_cast<Instruction>(&V);
9079 
9080     if (auto *ICI = dyn_cast<ICmpInst>(I))
9081       return updateWithICmpInst(A, ICI);
9082 
9083     if (auto *SI = dyn_cast<SelectInst>(I))
9084       return updateWithSelectInst(A, SI);
9085 
9086     if (auto *CI = dyn_cast<CastInst>(I))
9087       return updateWithCastInst(A, CI);
9088 
9089     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9090       return updateWithBinaryOperator(A, BinOp);
9091 
9092     if (auto *PHI = dyn_cast<PHINode>(I))
9093       return updateWithPHINode(A, PHI);
9094 
9095     if (auto *L = dyn_cast<LoadInst>(I))
9096       return updateWithLoad(A, *L);
9097 
9098     return indicatePessimisticFixpoint();
9099   }
9100 
9101   /// See AbstractAttribute::trackStatistics()
9102   void trackStatistics() const override {
9103     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9104   }
9105 };
9106 
9107 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9108   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9109       : AAPotentialValuesImpl(IRP, A) {}
9110 
9111   /// See AbstractAttribute::initialize(...).
9112   ChangeStatus updateImpl(Attributor &A) override {
9113     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9114                      "not be called");
9115   }
9116 
9117   /// See AbstractAttribute::trackStatistics()
9118   void trackStatistics() const override {
9119     STATS_DECLTRACK_FN_ATTR(potential_values)
9120   }
9121 };
9122 
9123 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9124   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9125       : AAPotentialValuesFunction(IRP, A) {}
9126 
9127   /// See AbstractAttribute::trackStatistics()
9128   void trackStatistics() const override {
9129     STATS_DECLTRACK_CS_ATTR(potential_values)
9130   }
9131 };
9132 
9133 struct AAPotentialValuesCallSiteReturned
9134     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9135   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9136       : AACallSiteReturnedFromReturned<AAPotentialValues,
9137                                        AAPotentialValuesImpl>(IRP, A) {}
9138 
9139   /// See AbstractAttribute::trackStatistics()
9140   void trackStatistics() const override {
9141     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9142   }
9143 };
9144 
9145 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9146   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9147       : AAPotentialValuesFloating(IRP, A) {}
9148 
9149   /// See AbstractAttribute::initialize(..).
9150   void initialize(Attributor &A) override {
9151     AAPotentialValuesImpl::initialize(A);
9152     if (isAtFixpoint())
9153       return;
9154 
9155     Value &V = getAssociatedValue();
9156 
9157     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9158       unionAssumed(C->getValue());
9159       indicateOptimisticFixpoint();
9160       return;
9161     }
9162 
9163     if (isa<UndefValue>(&V)) {
9164       unionAssumedWithUndef();
9165       indicateOptimisticFixpoint();
9166       return;
9167     }
9168   }
9169 
9170   /// See AbstractAttribute::updateImpl(...).
9171   ChangeStatus updateImpl(Attributor &A) override {
9172     Value &V = getAssociatedValue();
9173     auto AssumedBefore = getAssumed();
9174     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9175                                              DepClassTy::REQUIRED);
9176     const auto &S = AA.getAssumed();
9177     unionAssumed(S);
9178     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9179                                          : ChangeStatus::CHANGED;
9180   }
9181 
9182   /// See AbstractAttribute::trackStatistics()
9183   void trackStatistics() const override {
9184     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9185   }
9186 };
9187 
9188 /// ------------------------ NoUndef Attribute ---------------------------------
9189 struct AANoUndefImpl : AANoUndef {
9190   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9191 
9192   /// See AbstractAttribute::initialize(...).
9193   void initialize(Attributor &A) override {
9194     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9195       indicateOptimisticFixpoint();
9196       return;
9197     }
9198     Value &V = getAssociatedValue();
9199     if (isa<UndefValue>(V))
9200       indicatePessimisticFixpoint();
9201     else if (isa<FreezeInst>(V))
9202       indicateOptimisticFixpoint();
9203     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9204              isGuaranteedNotToBeUndefOrPoison(&V))
9205       indicateOptimisticFixpoint();
9206     else
9207       AANoUndef::initialize(A);
9208   }
9209 
9210   /// See followUsesInMBEC
9211   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9212                        AANoUndef::StateType &State) {
9213     const Value *UseV = U->get();
9214     const DominatorTree *DT = nullptr;
9215     AssumptionCache *AC = nullptr;
9216     InformationCache &InfoCache = A.getInfoCache();
9217     if (Function *F = getAnchorScope()) {
9218       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9219       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9220     }
9221     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9222     bool TrackUse = false;
9223     // Track use for instructions which must produce undef or poison bits when
9224     // at least one operand contains such bits.
9225     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9226       TrackUse = true;
9227     return TrackUse;
9228   }
9229 
9230   /// See AbstractAttribute::getAsStr().
9231   const std::string getAsStr() const override {
9232     return getAssumed() ? "noundef" : "may-undef-or-poison";
9233   }
9234 
9235   ChangeStatus manifest(Attributor &A) override {
9236     // We don't manifest noundef attribute for dead positions because the
9237     // associated values with dead positions would be replaced with undef
9238     // values.
9239     bool UsedAssumedInformation = false;
9240     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9241                         UsedAssumedInformation))
9242       return ChangeStatus::UNCHANGED;
9243     // A position whose simplified value does not have any value is
9244     // considered to be dead. We don't manifest noundef in such positions for
9245     // the same reason above.
9246     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9247              .hasValue())
9248       return ChangeStatus::UNCHANGED;
9249     return AANoUndef::manifest(A);
9250   }
9251 };
9252 
9253 struct AANoUndefFloating : public AANoUndefImpl {
9254   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9255       : AANoUndefImpl(IRP, A) {}
9256 
9257   /// See AbstractAttribute::initialize(...).
9258   void initialize(Attributor &A) override {
9259     AANoUndefImpl::initialize(A);
9260     if (!getState().isAtFixpoint())
9261       if (Instruction *CtxI = getCtxI())
9262         followUsesInMBEC(*this, A, getState(), *CtxI);
9263   }
9264 
9265   /// See AbstractAttribute::updateImpl(...).
9266   ChangeStatus updateImpl(Attributor &A) override {
9267     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9268                             AANoUndef::StateType &T, bool Stripped) -> bool {
9269       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9270                                              DepClassTy::REQUIRED);
9271       if (!Stripped && this == &AA) {
9272         T.indicatePessimisticFixpoint();
9273       } else {
9274         const AANoUndef::StateType &S =
9275             static_cast<const AANoUndef::StateType &>(AA.getState());
9276         T ^= S;
9277       }
9278       return T.isValidState();
9279     };
9280 
9281     StateType T;
9282     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9283                                           VisitValueCB, getCtxI()))
9284       return indicatePessimisticFixpoint();
9285 
9286     return clampStateAndIndicateChange(getState(), T);
9287   }
9288 
9289   /// See AbstractAttribute::trackStatistics()
9290   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9291 };
9292 
9293 struct AANoUndefReturned final
9294     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9295   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9296       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9297 
9298   /// See AbstractAttribute::trackStatistics()
9299   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9300 };
9301 
9302 struct AANoUndefArgument final
9303     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9304   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9305       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9306 
9307   /// See AbstractAttribute::trackStatistics()
9308   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9309 };
9310 
9311 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9312   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9313       : AANoUndefFloating(IRP, A) {}
9314 
9315   /// See AbstractAttribute::trackStatistics()
9316   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9317 };
9318 
9319 struct AANoUndefCallSiteReturned final
9320     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9321   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9322       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9323 
9324   /// See AbstractAttribute::trackStatistics()
9325   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9326 };
9327 
9328 struct AACallEdgesImpl : public AACallEdges {
9329   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9330 
9331   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9332     return CalledFunctions;
9333   }
9334 
9335   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9336 
9337   virtual bool hasNonAsmUnknownCallee() const override {
9338     return HasUnknownCalleeNonAsm;
9339   }
9340 
9341   const std::string getAsStr() const override {
9342     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9343            std::to_string(CalledFunctions.size()) + "]";
9344   }
9345 
9346   void trackStatistics() const override {}
9347 
9348 protected:
9349   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9350     if (CalledFunctions.insert(Fn)) {
9351       Change = ChangeStatus::CHANGED;
9352       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9353                         << "\n");
9354     }
9355   }
9356 
9357   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9358     if (!HasUnknownCallee)
9359       Change = ChangeStatus::CHANGED;
9360     if (NonAsm && !HasUnknownCalleeNonAsm)
9361       Change = ChangeStatus::CHANGED;
9362     HasUnknownCalleeNonAsm |= NonAsm;
9363     HasUnknownCallee = true;
9364   }
9365 
9366 private:
9367   /// Optimistic set of functions that might be called by this position.
9368   SetVector<Function *> CalledFunctions;
9369 
9370   /// Is there any call with a unknown callee.
9371   bool HasUnknownCallee = false;
9372 
9373   /// Is there any call with a unknown callee, excluding any inline asm.
9374   bool HasUnknownCalleeNonAsm = false;
9375 };
9376 
9377 struct AACallEdgesCallSite : public AACallEdgesImpl {
9378   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9379       : AACallEdgesImpl(IRP, A) {}
9380   /// See AbstractAttribute::updateImpl(...).
9381   ChangeStatus updateImpl(Attributor &A) override {
9382     ChangeStatus Change = ChangeStatus::UNCHANGED;
9383 
9384     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9385                           bool Stripped) -> bool {
9386       if (Function *Fn = dyn_cast<Function>(&V)) {
9387         addCalledFunction(Fn, Change);
9388       } else {
9389         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9390         setHasUnknownCallee(true, Change);
9391       }
9392 
9393       // Explore all values.
9394       return true;
9395     };
9396 
9397     // Process any value that we might call.
9398     auto ProcessCalledOperand = [&](Value *V) {
9399       bool DummyValue = false;
9400       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9401                                        DummyValue, VisitValue, nullptr,
9402                                        false)) {
9403         // If we haven't gone through all values, assume that there are unknown
9404         // callees.
9405         setHasUnknownCallee(true, Change);
9406       }
9407     };
9408 
9409     CallBase *CB = static_cast<CallBase *>(getCtxI());
9410 
9411     if (CB->isInlineAsm()) {
9412       setHasUnknownCallee(false, Change);
9413       return Change;
9414     }
9415 
9416     // Process callee metadata if available.
9417     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9418       for (auto &Op : MD->operands()) {
9419         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9420         if (Callee)
9421           addCalledFunction(Callee, Change);
9422       }
9423       return Change;
9424     }
9425 
9426     // The most simple case.
9427     ProcessCalledOperand(CB->getCalledOperand());
9428 
9429     // Process callback functions.
9430     SmallVector<const Use *, 4u> CallbackUses;
9431     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9432     for (const Use *U : CallbackUses)
9433       ProcessCalledOperand(U->get());
9434 
9435     return Change;
9436   }
9437 };
9438 
9439 struct AACallEdgesFunction : public AACallEdgesImpl {
9440   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9441       : AACallEdgesImpl(IRP, A) {}
9442 
9443   /// See AbstractAttribute::updateImpl(...).
9444   ChangeStatus updateImpl(Attributor &A) override {
9445     ChangeStatus Change = ChangeStatus::UNCHANGED;
9446 
9447     auto ProcessCallInst = [&](Instruction &Inst) {
9448       CallBase &CB = static_cast<CallBase &>(Inst);
9449 
9450       auto &CBEdges = A.getAAFor<AACallEdges>(
9451           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9452       if (CBEdges.hasNonAsmUnknownCallee())
9453         setHasUnknownCallee(true, Change);
9454       if (CBEdges.hasUnknownCallee())
9455         setHasUnknownCallee(false, Change);
9456 
9457       for (Function *F : CBEdges.getOptimisticEdges())
9458         addCalledFunction(F, Change);
9459 
9460       return true;
9461     };
9462 
9463     // Visit all callable instructions.
9464     bool UsedAssumedInformation = false;
9465     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9466                                            UsedAssumedInformation)) {
9467       // If we haven't looked at all call like instructions, assume that there
9468       // are unknown callees.
9469       setHasUnknownCallee(true, Change);
9470     }
9471 
9472     return Change;
9473   }
9474 };
9475 
9476 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9477 private:
9478   struct QuerySet {
9479     void markReachable(Function *Fn) {
9480       Reachable.insert(Fn);
9481       Unreachable.erase(Fn);
9482     }
9483 
9484     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9485                         ArrayRef<const AACallEdges *> AAEdgesList) {
9486       ChangeStatus Change = ChangeStatus::UNCHANGED;
9487 
9488       for (auto *AAEdges : AAEdgesList) {
9489         if (AAEdges->hasUnknownCallee()) {
9490           if (!CanReachUnknownCallee)
9491             Change = ChangeStatus::CHANGED;
9492           CanReachUnknownCallee = true;
9493           return Change;
9494         }
9495       }
9496 
9497       for (Function *Fn : make_early_inc_range(Unreachable)) {
9498         if (checkIfReachable(A, AA, AAEdgesList, Fn)) {
9499           Change = ChangeStatus::CHANGED;
9500           markReachable(Fn);
9501         }
9502       }
9503       return Change;
9504     }
9505 
9506     bool isReachable(Attributor &A, const AAFunctionReachability &AA,
9507                      ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) {
9508       // Assume that we can reach the function.
9509       // TODO: Be more specific with the unknown callee.
9510       if (CanReachUnknownCallee)
9511         return true;
9512 
9513       if (Reachable.count(Fn))
9514         return true;
9515 
9516       if (Unreachable.count(Fn))
9517         return false;
9518 
9519       // We need to assume that this function can't reach Fn to prevent
9520       // an infinite loop if this function is recursive.
9521       Unreachable.insert(Fn);
9522 
9523       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9524       if (Result)
9525         markReachable(Fn);
9526       return Result;
9527     }
9528 
9529     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9530                           ArrayRef<const AACallEdges *> AAEdgesList,
9531                           Function *Fn) const {
9532 
9533       // Handle the most trivial case first.
9534       for (auto *AAEdges : AAEdgesList) {
9535         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9536 
9537         if (Edges.count(Fn))
9538           return true;
9539       }
9540 
9541       SmallVector<const AAFunctionReachability *, 8> Deps;
9542       for (auto &AAEdges : AAEdgesList) {
9543         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9544 
9545         for (Function *Edge : Edges) {
9546           // We don't need a dependency if the result is reachable.
9547           const AAFunctionReachability &EdgeReachability =
9548               A.getAAFor<AAFunctionReachability>(
9549                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9550           Deps.push_back(&EdgeReachability);
9551 
9552           if (EdgeReachability.canReach(A, Fn))
9553             return true;
9554         }
9555       }
9556 
9557       // The result is false for now, set dependencies and leave.
9558       for (auto Dep : Deps)
9559         A.recordDependence(AA, *Dep, DepClassTy::REQUIRED);
9560 
9561       return false;
9562     }
9563 
9564     /// Set of functions that we know for sure is reachable.
9565     DenseSet<Function *> Reachable;
9566 
9567     /// Set of functions that are unreachable, but might become reachable.
9568     DenseSet<Function *> Unreachable;
9569 
9570     /// If we can reach a function with a call to a unknown function we assume
9571     /// that we can reach any function.
9572     bool CanReachUnknownCallee = false;
9573   };
9574 
9575 public:
9576   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9577       : AAFunctionReachability(IRP, A) {}
9578 
9579   bool canReach(Attributor &A, Function *Fn) const override {
9580     const AACallEdges &AAEdges =
9581         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9582 
9583     // Attributor returns attributes as const, so this function has to be
9584     // const for users of this attribute to use it without having to do
9585     // a const_cast.
9586     // This is a hack for us to be able to cache queries.
9587     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9588     bool Result =
9589         NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn);
9590 
9591     return Result;
9592   }
9593 
9594   /// Can \p CB reach \p Fn
9595   bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override {
9596     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9597         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9598 
9599     // Attributor returns attributes as const, so this function has to be
9600     // const for users of this attribute to use it without having to do
9601     // a const_cast.
9602     // This is a hack for us to be able to cache queries.
9603     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9604     QuerySet &CBQuery = NonConstThis->CBQueries[&CB];
9605 
9606     bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn);
9607 
9608     return Result;
9609   }
9610 
9611   /// See AbstractAttribute::updateImpl(...).
9612   ChangeStatus updateImpl(Attributor &A) override {
9613     const AACallEdges &AAEdges =
9614         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9615     ChangeStatus Change = ChangeStatus::UNCHANGED;
9616 
9617     Change |= WholeFunction.update(A, *this, {&AAEdges});
9618 
9619     for (auto CBPair : CBQueries) {
9620       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9621           *this, IRPosition::callsite_function(*CBPair.first),
9622           DepClassTy::REQUIRED);
9623 
9624       Change |= CBPair.second.update(A, *this, {&AAEdges});
9625     }
9626 
9627     return Change;
9628   }
9629 
9630   const std::string getAsStr() const override {
9631     size_t QueryCount =
9632         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9633 
9634     return "FunctionReachability [" +
9635            std::to_string(WholeFunction.Reachable.size()) + "," +
9636            std::to_string(QueryCount) + "]";
9637   }
9638 
9639   void trackStatistics() const override {}
9640 private:
9641   bool canReachUnknownCallee() const override {
9642     return WholeFunction.CanReachUnknownCallee;
9643   }
9644 
9645   /// Used to answer if a the whole function can reacha a specific function.
9646   QuerySet WholeFunction;
9647 
9648   /// Used to answer if a call base inside this function can reach a specific
9649   /// function.
9650   DenseMap<CallBase *, QuerySet> CBQueries;
9651 };
9652 
9653 } // namespace
9654 
9655 AACallGraphNode *AACallEdgeIterator::operator*() const {
9656   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9657       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9658 }
9659 
9660 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9661 
9662 const char AAReturnedValues::ID = 0;
9663 const char AANoUnwind::ID = 0;
9664 const char AANoSync::ID = 0;
9665 const char AANoFree::ID = 0;
9666 const char AANonNull::ID = 0;
9667 const char AANoRecurse::ID = 0;
9668 const char AAWillReturn::ID = 0;
9669 const char AAUndefinedBehavior::ID = 0;
9670 const char AANoAlias::ID = 0;
9671 const char AAReachability::ID = 0;
9672 const char AANoReturn::ID = 0;
9673 const char AAIsDead::ID = 0;
9674 const char AADereferenceable::ID = 0;
9675 const char AAAlign::ID = 0;
9676 const char AANoCapture::ID = 0;
9677 const char AAValueSimplify::ID = 0;
9678 const char AAHeapToStack::ID = 0;
9679 const char AAPrivatizablePtr::ID = 0;
9680 const char AAMemoryBehavior::ID = 0;
9681 const char AAMemoryLocation::ID = 0;
9682 const char AAValueConstantRange::ID = 0;
9683 const char AAPotentialValues::ID = 0;
9684 const char AANoUndef::ID = 0;
9685 const char AACallEdges::ID = 0;
9686 const char AAFunctionReachability::ID = 0;
9687 const char AAPointerInfo::ID = 0;
9688 
9689 // Macro magic to create the static generator function for attributes that
9690 // follow the naming scheme.
9691 
9692 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9693   case IRPosition::PK:                                                         \
9694     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9695 
9696 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9697   case IRPosition::PK:                                                         \
9698     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9699     ++NumAAs;                                                                  \
9700     break;
9701 
9702 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9703   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9704     CLASS *AA = nullptr;                                                       \
9705     switch (IRP.getPositionKind()) {                                           \
9706       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9707       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9708       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9709       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9710       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9711       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9712       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9713       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9714     }                                                                          \
9715     return *AA;                                                                \
9716   }
9717 
9718 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9719   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9720     CLASS *AA = nullptr;                                                       \
9721     switch (IRP.getPositionKind()) {                                           \
9722       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9723       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9724       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9725       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9726       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9727       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9728       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9729       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9730     }                                                                          \
9731     return *AA;                                                                \
9732   }
9733 
9734 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9735   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9736     CLASS *AA = nullptr;                                                       \
9737     switch (IRP.getPositionKind()) {                                           \
9738       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9739       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9740       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9741       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9742       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9743       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9744       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9745       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9746     }                                                                          \
9747     return *AA;                                                                \
9748   }
9749 
9750 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9751   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9752     CLASS *AA = nullptr;                                                       \
9753     switch (IRP.getPositionKind()) {                                           \
9754       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9755       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9756       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9757       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9758       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9759       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9760       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9761       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9762     }                                                                          \
9763     return *AA;                                                                \
9764   }
9765 
9766 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9767   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9768     CLASS *AA = nullptr;                                                       \
9769     switch (IRP.getPositionKind()) {                                           \
9770       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9771       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9772       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9773       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9774       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9775       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9776       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9777       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9778     }                                                                          \
9779     return *AA;                                                                \
9780   }
9781 
9782 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9783 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9784 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9785 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9786 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9787 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9788 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9789 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9790 
9791 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9792 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9793 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9794 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9795 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9796 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9797 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9798 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9799 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9800 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9801 
9802 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9803 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9804 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9805 
9806 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9807 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9808 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9809 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9810 
9811 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9812 
9813 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9814 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9815 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9816 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9817 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9818 #undef SWITCH_PK_CREATE
9819 #undef SWITCH_PK_INV
9820