1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SCCIterator.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumeBundleQueries.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LazyValueInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
28 #include "llvm/Analysis/ScalarEvolution.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/NoFolder.h"
37 #include "llvm/Support/Alignment.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/FileSystem.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include <cassert>
46 
47 using namespace llvm;
48 
49 #define DEBUG_TYPE "attributor"
50 
51 static cl::opt<bool> ManifestInternal(
52     "attributor-manifest-internal", cl::Hidden,
53     cl::desc("Manifest Attributor internal string attributes."),
54     cl::init(false));
55 
56 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
57                                        cl::Hidden);
58 
59 template <>
60 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
61 
62 static cl::opt<unsigned, true> MaxPotentialValues(
63     "attributor-max-potential-values", cl::Hidden,
64     cl::desc("Maximum number of potential values to be "
65              "tracked for each position."),
66     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
67     cl::init(7));
68 
69 STATISTIC(NumAAs, "Number of abstract attributes created");
70 
71 // Some helper macros to deal with statistics tracking.
72 //
73 // Usage:
74 // For simple IR attribute tracking overload trackStatistics in the abstract
75 // attribute and choose the right STATS_DECLTRACK_********* macro,
76 // e.g.,:
77 //  void trackStatistics() const override {
78 //    STATS_DECLTRACK_ARG_ATTR(returned)
79 //  }
80 // If there is a single "increment" side one can use the macro
81 // STATS_DECLTRACK with a custom message. If there are multiple increment
82 // sides, STATS_DECL and STATS_TRACK can also be used separately.
83 //
84 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
85   ("Number of " #TYPE " marked '" #NAME "'")
86 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
87 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
88 #define STATS_DECL(NAME, TYPE, MSG)                                            \
89   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
90 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
91 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
92   {                                                                            \
93     STATS_DECL(NAME, TYPE, MSG)                                                \
94     STATS_TRACK(NAME, TYPE)                                                    \
95   }
96 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
97   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
98 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
99   STATS_DECLTRACK(NAME, CSArguments,                                           \
100                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
101 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
102   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
103 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
104   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
105 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
106   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
107                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
108 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
109   STATS_DECLTRACK(NAME, CSReturn,                                              \
110                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
111 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
112   STATS_DECLTRACK(NAME, Floating,                                              \
113                   ("Number of floating values known to be '" #NAME "'"))
114 
115 // Specialization of the operator<< for abstract attributes subclasses. This
116 // disambiguates situations where multiple operators are applicable.
117 namespace llvm {
118 #define PIPE_OPERATOR(CLASS)                                                   \
119   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
120     return OS << static_cast<const AbstractAttribute &>(AA);                   \
121   }
122 
123 PIPE_OPERATOR(AAIsDead)
124 PIPE_OPERATOR(AANoUnwind)
125 PIPE_OPERATOR(AANoSync)
126 PIPE_OPERATOR(AANoRecurse)
127 PIPE_OPERATOR(AAWillReturn)
128 PIPE_OPERATOR(AANoReturn)
129 PIPE_OPERATOR(AAReturnedValues)
130 PIPE_OPERATOR(AANonNull)
131 PIPE_OPERATOR(AANoAlias)
132 PIPE_OPERATOR(AADereferenceable)
133 PIPE_OPERATOR(AAAlign)
134 PIPE_OPERATOR(AANoCapture)
135 PIPE_OPERATOR(AAValueSimplify)
136 PIPE_OPERATOR(AANoFree)
137 PIPE_OPERATOR(AAHeapToStack)
138 PIPE_OPERATOR(AAReachability)
139 PIPE_OPERATOR(AAMemoryBehavior)
140 PIPE_OPERATOR(AAMemoryLocation)
141 PIPE_OPERATOR(AAValueConstantRange)
142 PIPE_OPERATOR(AAPrivatizablePtr)
143 PIPE_OPERATOR(AAUndefinedBehavior)
144 PIPE_OPERATOR(AAPotentialValues)
145 PIPE_OPERATOR(AANoUndef)
146 PIPE_OPERATOR(AACallEdges)
147 PIPE_OPERATOR(AAFunctionReachability)
148 PIPE_OPERATOR(AAPointerInfo)
149 
150 #undef PIPE_OPERATOR
151 
152 template <>
153 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
154                                                      const DerefState &R) {
155   ChangeStatus CS0 =
156       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
157   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
158   return CS0 | CS1;
159 }
160 
161 } // namespace llvm
162 
163 /// Get pointer operand of memory accessing instruction. If \p I is
164 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
165 /// is set to false and the instruction is volatile, return nullptr.
166 static const Value *getPointerOperand(const Instruction *I,
167                                       bool AllowVolatile) {
168   if (!AllowVolatile && I->isVolatile())
169     return nullptr;
170 
171   if (auto *LI = dyn_cast<LoadInst>(I)) {
172     return LI->getPointerOperand();
173   }
174 
175   if (auto *SI = dyn_cast<StoreInst>(I)) {
176     return SI->getPointerOperand();
177   }
178 
179   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
180     return CXI->getPointerOperand();
181   }
182 
183   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
184     return RMWI->getPointerOperand();
185   }
186 
187   return nullptr;
188 }
189 
190 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
191 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
192 /// getelement pointer instructions that traverse the natural type of \p Ptr if
193 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
194 /// through a cast to i8*.
195 ///
196 /// TODO: This could probably live somewhere more prominantly if it doesn't
197 ///       already exist.
198 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
199                                int64_t Offset, IRBuilder<NoFolder> &IRB,
200                                const DataLayout &DL) {
201   assert(Offset >= 0 && "Negative offset not supported yet!");
202   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
203                     << "-bytes as " << *ResTy << "\n");
204 
205   if (Offset) {
206     Type *Ty = PtrElemTy;
207     APInt IntOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), Offset);
208     SmallVector<APInt> IntIndices = DL.getGEPIndicesForOffset(Ty, IntOffset);
209 
210     SmallVector<Value *, 4> ValIndices;
211     std::string GEPName = Ptr->getName().str();
212     for (const APInt &Index : IntIndices) {
213       ValIndices.push_back(IRB.getInt(Index));
214       GEPName += "." + std::to_string(Index.getZExtValue());
215     }
216 
217     // Create a GEP for the indices collected above.
218     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, ValIndices, GEPName);
219 
220     // If an offset is left we use byte-wise adjustment.
221     if (IntOffset != 0) {
222       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
223       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt(IntOffset),
224                           GEPName + ".b" + Twine(IntOffset.getZExtValue()));
225     }
226   }
227 
228   // Ensure the result has the requested type.
229   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
230 
231   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
232   return Ptr;
233 }
234 
235 /// Recursively visit all values that might become \p IRP at some point. This
236 /// will be done by looking through cast instructions, selects, phis, and calls
237 /// with the "returned" attribute. Once we cannot look through the value any
238 /// further, the callback \p VisitValueCB is invoked and passed the current
239 /// value, the \p State, and a flag to indicate if we stripped anything.
240 /// Stripped means that we unpacked the value associated with \p IRP at least
241 /// once. Note that the value used for the callback may still be the value
242 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
243 /// we will never visit more values than specified by \p MaxValues.
244 template <typename StateTy>
245 static bool genericValueTraversal(
246     Attributor &A, IRPosition IRP, const AbstractAttribute &QueryingAA,
247     StateTy &State,
248     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
249         VisitValueCB,
250     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
251     function_ref<Value *(Value *)> StripCB = nullptr) {
252 
253   const AAIsDead *LivenessAA = nullptr;
254   if (IRP.getAnchorScope())
255     LivenessAA = &A.getAAFor<AAIsDead>(
256         QueryingAA,
257         IRPosition::function(*IRP.getAnchorScope(), IRP.getCallBaseContext()),
258         DepClassTy::NONE);
259   bool AnyDead = false;
260 
261   Value *InitialV = &IRP.getAssociatedValue();
262   using Item = std::pair<Value *, const Instruction *>;
263   SmallSet<Item, 16> Visited;
264   SmallVector<Item, 16> Worklist;
265   Worklist.push_back({InitialV, CtxI});
266 
267   int Iteration = 0;
268   do {
269     Item I = Worklist.pop_back_val();
270     Value *V = I.first;
271     CtxI = I.second;
272     if (StripCB)
273       V = StripCB(V);
274 
275     // Check if we should process the current value. To prevent endless
276     // recursion keep a record of the values we followed!
277     if (!Visited.insert(I).second)
278       continue;
279 
280     // Make sure we limit the compile time for complex expressions.
281     if (Iteration++ >= MaxValues)
282       return false;
283 
284     // Explicitly look through calls with a "returned" attribute if we do
285     // not have a pointer as stripPointerCasts only works on them.
286     Value *NewV = nullptr;
287     if (V->getType()->isPointerTy()) {
288       NewV = V->stripPointerCasts();
289     } else {
290       auto *CB = dyn_cast<CallBase>(V);
291       if (CB && CB->getCalledFunction()) {
292         for (Argument &Arg : CB->getCalledFunction()->args())
293           if (Arg.hasReturnedAttr()) {
294             NewV = CB->getArgOperand(Arg.getArgNo());
295             break;
296           }
297       }
298     }
299     if (NewV && NewV != V) {
300       Worklist.push_back({NewV, CtxI});
301       continue;
302     }
303 
304     // Look through select instructions, visit assumed potential values.
305     if (auto *SI = dyn_cast<SelectInst>(V)) {
306       bool UsedAssumedInformation = false;
307       Optional<Constant *> C = A.getAssumedConstant(
308           *SI->getCondition(), QueryingAA, UsedAssumedInformation);
309       bool NoValueYet = !C.hasValue();
310       if (NoValueYet || isa_and_nonnull<UndefValue>(*C))
311         continue;
312       if (auto *CI = dyn_cast_or_null<ConstantInt>(*C)) {
313         if (CI->isZero())
314           Worklist.push_back({SI->getFalseValue(), CtxI});
315         else
316           Worklist.push_back({SI->getTrueValue(), CtxI});
317         continue;
318       }
319       // We could not simplify the condition, assume both values.(
320       Worklist.push_back({SI->getTrueValue(), CtxI});
321       Worklist.push_back({SI->getFalseValue(), CtxI});
322       continue;
323     }
324 
325     // Look through phi nodes, visit all live operands.
326     if (auto *PHI = dyn_cast<PHINode>(V)) {
327       assert(LivenessAA &&
328              "Expected liveness in the presence of instructions!");
329       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
330         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
331         bool UsedAssumedInformation = false;
332         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
333                             LivenessAA, UsedAssumedInformation,
334                             /* CheckBBLivenessOnly */ true)) {
335           AnyDead = true;
336           continue;
337         }
338         Worklist.push_back(
339             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
340       }
341       continue;
342     }
343 
344     if (UseValueSimplify && !isa<Constant>(V)) {
345       bool UsedAssumedInformation = false;
346       Optional<Value *> SimpleV =
347           A.getAssumedSimplified(*V, QueryingAA, UsedAssumedInformation);
348       if (!SimpleV.hasValue())
349         continue;
350       if (!SimpleV.getValue())
351         return false;
352       Value *NewV = SimpleV.getValue();
353       if (NewV != V) {
354         Worklist.push_back({NewV, CtxI});
355         continue;
356       }
357     }
358 
359     // Once a leaf is reached we inform the user through the callback.
360     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
361       return false;
362   } while (!Worklist.empty());
363 
364   // If we actually used liveness information so we have to record a dependence.
365   if (AnyDead)
366     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
367 
368   // All values have been visited.
369   return true;
370 }
371 
372 bool AA::getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
373                                      SmallVectorImpl<Value *> &Objects,
374                                      const AbstractAttribute &QueryingAA,
375                                      const Instruction *CtxI) {
376   auto StripCB = [&](Value *V) { return getUnderlyingObject(V); };
377   SmallPtrSet<Value *, 8> SeenObjects;
378   auto VisitValueCB = [&SeenObjects](Value &Val, const Instruction *,
379                                      SmallVectorImpl<Value *> &Objects,
380                                      bool) -> bool {
381     if (SeenObjects.insert(&Val).second)
382       Objects.push_back(&Val);
383     return true;
384   };
385   if (!genericValueTraversal<decltype(Objects)>(
386           A, IRPosition::value(Ptr), QueryingAA, Objects, VisitValueCB, CtxI,
387           true, 32, StripCB))
388     return false;
389   return true;
390 }
391 
392 const Value *stripAndAccumulateMinimalOffsets(
393     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
394     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
395     bool UseAssumed = false) {
396 
397   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
398     const IRPosition &Pos = IRPosition::value(V);
399     // Only track dependence if we are going to use the assumed info.
400     const AAValueConstantRange &ValueConstantRangeAA =
401         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
402                                          UseAssumed ? DepClassTy::OPTIONAL
403                                                     : DepClassTy::NONE);
404     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
405                                      : ValueConstantRangeAA.getKnown();
406     // We can only use the lower part of the range because the upper part can
407     // be higher than what the value can really be.
408     ROffset = Range.getSignedMin();
409     return true;
410   };
411 
412   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
413                                                 /* AllowInvariant */ false,
414                                                 AttributorAnalysis);
415 }
416 
417 static const Value *getMinimalBaseOfAccsesPointerOperand(
418     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
419     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
420   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
421   if (!Ptr)
422     return nullptr;
423   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
424   const Value *Base = stripAndAccumulateMinimalOffsets(
425       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
426 
427   BytesOffset = OffsetAPInt.getSExtValue();
428   return Base;
429 }
430 
431 static const Value *
432 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
433                                      const DataLayout &DL,
434                                      bool AllowNonInbounds = false) {
435   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
436   if (!Ptr)
437     return nullptr;
438 
439   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
440                                           AllowNonInbounds);
441 }
442 
443 /// Clamp the information known for all returned values of a function
444 /// (identified by \p QueryingAA) into \p S.
445 template <typename AAType, typename StateType = typename AAType::StateType>
446 static void clampReturnedValueStates(
447     Attributor &A, const AAType &QueryingAA, StateType &S,
448     const IRPosition::CallBaseContext *CBContext = nullptr) {
449   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
450                     << QueryingAA << " into " << S << "\n");
451 
452   assert((QueryingAA.getIRPosition().getPositionKind() ==
453               IRPosition::IRP_RETURNED ||
454           QueryingAA.getIRPosition().getPositionKind() ==
455               IRPosition::IRP_CALL_SITE_RETURNED) &&
456          "Can only clamp returned value states for a function returned or call "
457          "site returned position!");
458 
459   // Use an optional state as there might not be any return values and we want
460   // to join (IntegerState::operator&) the state of all there are.
461   Optional<StateType> T;
462 
463   // Callback for each possibly returned value.
464   auto CheckReturnValue = [&](Value &RV) -> bool {
465     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
466     const AAType &AA =
467         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
468     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
469                       << " @ " << RVPos << "\n");
470     const StateType &AAS = AA.getState();
471     if (T.hasValue())
472       *T &= AAS;
473     else
474       T = AAS;
475     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
476                       << "\n");
477     return T->isValidState();
478   };
479 
480   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
481     S.indicatePessimisticFixpoint();
482   else if (T.hasValue())
483     S ^= *T;
484 }
485 
486 /// Helper class for generic deduction: return value -> returned position.
487 template <typename AAType, typename BaseType,
488           typename StateType = typename BaseType::StateType,
489           bool PropagateCallBaseContext = false>
490 struct AAReturnedFromReturnedValues : public BaseType {
491   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
492       : BaseType(IRP, A) {}
493 
494   /// See AbstractAttribute::updateImpl(...).
495   ChangeStatus updateImpl(Attributor &A) override {
496     StateType S(StateType::getBestState(this->getState()));
497     clampReturnedValueStates<AAType, StateType>(
498         A, *this, S,
499         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
500     // TODO: If we know we visited all returned values, thus no are assumed
501     // dead, we can take the known information from the state T.
502     return clampStateAndIndicateChange<StateType>(this->getState(), S);
503   }
504 };
505 
506 /// Clamp the information known at all call sites for a given argument
507 /// (identified by \p QueryingAA) into \p S.
508 template <typename AAType, typename StateType = typename AAType::StateType>
509 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
510                                         StateType &S) {
511   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
512                     << QueryingAA << " into " << S << "\n");
513 
514   assert(QueryingAA.getIRPosition().getPositionKind() ==
515              IRPosition::IRP_ARGUMENT &&
516          "Can only clamp call site argument states for an argument position!");
517 
518   // Use an optional state as there might not be any return values and we want
519   // to join (IntegerState::operator&) the state of all there are.
520   Optional<StateType> T;
521 
522   // The argument number which is also the call site argument number.
523   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
524 
525   auto CallSiteCheck = [&](AbstractCallSite ACS) {
526     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
527     // Check if a coresponding argument was found or if it is on not associated
528     // (which can happen for callback calls).
529     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
530       return false;
531 
532     const AAType &AA =
533         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
534     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
535                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
536     const StateType &AAS = AA.getState();
537     if (T.hasValue())
538       *T &= AAS;
539     else
540       T = AAS;
541     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
542                       << "\n");
543     return T->isValidState();
544   };
545 
546   bool AllCallSitesKnown;
547   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
548                               AllCallSitesKnown))
549     S.indicatePessimisticFixpoint();
550   else if (T.hasValue())
551     S ^= *T;
552 }
553 
554 /// This function is the bridge between argument position and the call base
555 /// context.
556 template <typename AAType, typename BaseType,
557           typename StateType = typename AAType::StateType>
558 bool getArgumentStateFromCallBaseContext(Attributor &A,
559                                          BaseType &QueryingAttribute,
560                                          IRPosition &Pos, StateType &State) {
561   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
562          "Expected an 'argument' position !");
563   const CallBase *CBContext = Pos.getCallBaseContext();
564   if (!CBContext)
565     return false;
566 
567   int ArgNo = Pos.getCallSiteArgNo();
568   assert(ArgNo >= 0 && "Invalid Arg No!");
569 
570   const auto &AA = A.getAAFor<AAType>(
571       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
572       DepClassTy::REQUIRED);
573   const StateType &CBArgumentState =
574       static_cast<const StateType &>(AA.getState());
575 
576   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
577                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
578                     << "\n");
579 
580   // NOTE: If we want to do call site grouping it should happen here.
581   State ^= CBArgumentState;
582   return true;
583 }
584 
585 /// Helper class for generic deduction: call site argument -> argument position.
586 template <typename AAType, typename BaseType,
587           typename StateType = typename AAType::StateType,
588           bool BridgeCallBaseContext = false>
589 struct AAArgumentFromCallSiteArguments : public BaseType {
590   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
591       : BaseType(IRP, A) {}
592 
593   /// See AbstractAttribute::updateImpl(...).
594   ChangeStatus updateImpl(Attributor &A) override {
595     StateType S = StateType::getBestState(this->getState());
596 
597     if (BridgeCallBaseContext) {
598       bool Success =
599           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
600               A, *this, this->getIRPosition(), S);
601       if (Success)
602         return clampStateAndIndicateChange<StateType>(this->getState(), S);
603     }
604     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
605 
606     // TODO: If we know we visited all incoming values, thus no are assumed
607     // dead, we can take the known information from the state T.
608     return clampStateAndIndicateChange<StateType>(this->getState(), S);
609   }
610 };
611 
612 /// Helper class for generic replication: function returned -> cs returned.
613 template <typename AAType, typename BaseType,
614           typename StateType = typename BaseType::StateType,
615           bool IntroduceCallBaseContext = false>
616 struct AACallSiteReturnedFromReturned : public BaseType {
617   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
618       : BaseType(IRP, A) {}
619 
620   /// See AbstractAttribute::updateImpl(...).
621   ChangeStatus updateImpl(Attributor &A) override {
622     assert(this->getIRPosition().getPositionKind() ==
623                IRPosition::IRP_CALL_SITE_RETURNED &&
624            "Can only wrap function returned positions for call site returned "
625            "positions!");
626     auto &S = this->getState();
627 
628     const Function *AssociatedFunction =
629         this->getIRPosition().getAssociatedFunction();
630     if (!AssociatedFunction)
631       return S.indicatePessimisticFixpoint();
632 
633     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
634     if (IntroduceCallBaseContext)
635       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
636                         << CBContext << "\n");
637 
638     IRPosition FnPos = IRPosition::returned(
639         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
640     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
641     return clampStateAndIndicateChange(S, AA.getState());
642   }
643 };
644 
645 /// Helper function to accumulate uses.
646 template <class AAType, typename StateType = typename AAType::StateType>
647 static void followUsesInContext(AAType &AA, Attributor &A,
648                                 MustBeExecutedContextExplorer &Explorer,
649                                 const Instruction *CtxI,
650                                 SetVector<const Use *> &Uses,
651                                 StateType &State) {
652   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
653   for (unsigned u = 0; u < Uses.size(); ++u) {
654     const Use *U = Uses[u];
655     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
656       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
657       if (Found && AA.followUseInMBEC(A, U, UserI, State))
658         for (const Use &Us : UserI->uses())
659           Uses.insert(&Us);
660     }
661   }
662 }
663 
664 /// Use the must-be-executed-context around \p I to add information into \p S.
665 /// The AAType class is required to have `followUseInMBEC` method with the
666 /// following signature and behaviour:
667 ///
668 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
669 /// U - Underlying use.
670 /// I - The user of the \p U.
671 /// Returns true if the value should be tracked transitively.
672 ///
673 template <class AAType, typename StateType = typename AAType::StateType>
674 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
675                              Instruction &CtxI) {
676 
677   // Container for (transitive) uses of the associated value.
678   SetVector<const Use *> Uses;
679   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
680     Uses.insert(&U);
681 
682   MustBeExecutedContextExplorer &Explorer =
683       A.getInfoCache().getMustBeExecutedContextExplorer();
684 
685   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
686 
687   if (S.isAtFixpoint())
688     return;
689 
690   SmallVector<const BranchInst *, 4> BrInsts;
691   auto Pred = [&](const Instruction *I) {
692     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
693       if (Br->isConditional())
694         BrInsts.push_back(Br);
695     return true;
696   };
697 
698   // Here, accumulate conditional branch instructions in the context. We
699   // explore the child paths and collect the known states. The disjunction of
700   // those states can be merged to its own state. Let ParentState_i be a state
701   // to indicate the known information for an i-th branch instruction in the
702   // context. ChildStates are created for its successors respectively.
703   //
704   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
705   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
706   //      ...
707   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
708   //
709   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
710   //
711   // FIXME: Currently, recursive branches are not handled. For example, we
712   // can't deduce that ptr must be dereferenced in below function.
713   //
714   // void f(int a, int c, int *ptr) {
715   //    if(a)
716   //      if (b) {
717   //        *ptr = 0;
718   //      } else {
719   //        *ptr = 1;
720   //      }
721   //    else {
722   //      if (b) {
723   //        *ptr = 0;
724   //      } else {
725   //        *ptr = 1;
726   //      }
727   //    }
728   // }
729 
730   Explorer.checkForAllContext(&CtxI, Pred);
731   for (const BranchInst *Br : BrInsts) {
732     StateType ParentState;
733 
734     // The known state of the parent state is a conjunction of children's
735     // known states so it is initialized with a best state.
736     ParentState.indicateOptimisticFixpoint();
737 
738     for (const BasicBlock *BB : Br->successors()) {
739       StateType ChildState;
740 
741       size_t BeforeSize = Uses.size();
742       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
743 
744       // Erase uses which only appear in the child.
745       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
746         It = Uses.erase(It);
747 
748       ParentState &= ChildState;
749     }
750 
751     // Use only known state.
752     S += ParentState;
753   }
754 }
755 
756 /// ------------------------ PointerInfo ---------------------------------------
757 
758 namespace llvm {
759 namespace AA {
760 namespace PointerInfo {
761 
762 /// An access kind description as used by AAPointerInfo.
763 struct OffsetAndSize;
764 
765 struct State;
766 
767 } // namespace PointerInfo
768 } // namespace AA
769 
770 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage.
771 template <>
772 struct DenseMapInfo<AAPointerInfo::Access> : DenseMapInfo<Instruction *> {
773   using Access = AAPointerInfo::Access;
774   static inline Access getEmptyKey();
775   static inline Access getTombstoneKey();
776   static unsigned getHashValue(const Access &A);
777   static bool isEqual(const Access &LHS, const Access &RHS);
778 };
779 
780 /// Helper that allows OffsetAndSize as a key in a DenseMap.
781 template <>
782 struct DenseMapInfo<AA::PointerInfo ::OffsetAndSize>
783     : DenseMapInfo<std::pair<int64_t, int64_t>> {};
784 
785 /// Helper for AA::PointerInfo::Acccess DenseMap/Set usage ignoring everythign
786 /// but the instruction
787 struct AccessAsInstructionInfo : DenseMapInfo<Instruction *> {
788   using Base = DenseMapInfo<Instruction *>;
789   using Access = AAPointerInfo::Access;
790   static inline Access getEmptyKey();
791   static inline Access getTombstoneKey();
792   static unsigned getHashValue(const Access &A);
793   static bool isEqual(const Access &LHS, const Access &RHS);
794 };
795 
796 } // namespace llvm
797 
798 /// Helper to represent an access offset and size, with logic to deal with
799 /// uncertainty and check for overlapping accesses.
800 struct AA::PointerInfo::OffsetAndSize : public std::pair<int64_t, int64_t> {
801   using BaseTy = std::pair<int64_t, int64_t>;
802   OffsetAndSize(int64_t Offset, int64_t Size) : BaseTy(Offset, Size) {}
803   OffsetAndSize(const BaseTy &P) : BaseTy(P) {}
804   int64_t getOffset() const { return first; }
805   int64_t getSize() const { return second; }
806   static OffsetAndSize getUnknown() { return OffsetAndSize(Unknown, Unknown); }
807 
808   /// Return true if this offset and size pair might describe an address that
809   /// overlaps with \p OAS.
810   bool mayOverlap(const OffsetAndSize &OAS) const {
811     // Any unknown value and we are giving up -> overlap.
812     if (OAS.getOffset() == OffsetAndSize::Unknown ||
813         OAS.getSize() == OffsetAndSize::Unknown ||
814         getOffset() == OffsetAndSize::Unknown ||
815         getSize() == OffsetAndSize::Unknown)
816       return true;
817 
818     // Check if one offset point is in the other interval [offset, offset+size].
819     return OAS.getOffset() + OAS.getSize() > getOffset() &&
820            OAS.getOffset() < getOffset() + getSize();
821   }
822 
823   /// Constant used to represent unknown offset or sizes.
824   static constexpr int64_t Unknown = 1 << 31;
825 };
826 
827 /// Implementation of the DenseMapInfo.
828 ///
829 ///{
830 inline llvm::AccessAsInstructionInfo::Access
831 llvm::AccessAsInstructionInfo::getEmptyKey() {
832   return Access(Base::getEmptyKey(), nullptr, AAPointerInfo::AK_READ, nullptr);
833 }
834 inline llvm::AccessAsInstructionInfo::Access
835 llvm::AccessAsInstructionInfo::getTombstoneKey() {
836   return Access(Base::getTombstoneKey(), nullptr, AAPointerInfo::AK_READ,
837                 nullptr);
838 }
839 unsigned llvm::AccessAsInstructionInfo::getHashValue(
840     const llvm::AccessAsInstructionInfo::Access &A) {
841   return Base::getHashValue(A.getRemoteInst());
842 }
843 bool llvm::AccessAsInstructionInfo::isEqual(
844     const llvm::AccessAsInstructionInfo::Access &LHS,
845     const llvm::AccessAsInstructionInfo::Access &RHS) {
846   return LHS.getRemoteInst() == RHS.getRemoteInst();
847 }
848 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
849 llvm::DenseMapInfo<AAPointerInfo::Access>::getEmptyKey() {
850   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_READ,
851                                nullptr);
852 }
853 inline llvm::DenseMapInfo<AAPointerInfo::Access>::Access
854 llvm::DenseMapInfo<AAPointerInfo::Access>::getTombstoneKey() {
855   return AAPointerInfo::Access(nullptr, nullptr, AAPointerInfo::AK_WRITE,
856                                nullptr);
857 }
858 
859 unsigned llvm::DenseMapInfo<AAPointerInfo::Access>::getHashValue(
860     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &A) {
861   return detail::combineHashValue(
862              DenseMapInfo<Instruction *>::getHashValue(A.getRemoteInst()),
863              (A.isWrittenValueYetUndetermined()
864                   ? ~0
865                   : DenseMapInfo<Value *>::getHashValue(A.getWrittenValue()))) +
866          A.getKind();
867 }
868 
869 bool llvm::DenseMapInfo<AAPointerInfo::Access>::isEqual(
870     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &LHS,
871     const llvm::DenseMapInfo<AAPointerInfo::Access>::Access &RHS) {
872   return LHS == RHS;
873 }
874 ///}
875 
876 /// A type to track pointer/struct usage and accesses for AAPointerInfo.
877 struct AA::PointerInfo::State : public AbstractState {
878 
879   /// Return the best possible representable state.
880   static State getBestState(const State &SIS) { return State(); }
881 
882   /// Return the worst possible representable state.
883   static State getWorstState(const State &SIS) {
884     State R;
885     R.indicatePessimisticFixpoint();
886     return R;
887   }
888 
889   State() {}
890   State(const State &SIS) : AccessBins(SIS.AccessBins) {}
891   State(State &&SIS) : AccessBins(std::move(SIS.AccessBins)) {}
892 
893   const State &getAssumed() const { return *this; }
894 
895   /// See AbstractState::isValidState().
896   bool isValidState() const override { return BS.isValidState(); }
897 
898   /// See AbstractState::isAtFixpoint().
899   bool isAtFixpoint() const override { return BS.isAtFixpoint(); }
900 
901   /// See AbstractState::indicateOptimisticFixpoint().
902   ChangeStatus indicateOptimisticFixpoint() override {
903     BS.indicateOptimisticFixpoint();
904     return ChangeStatus::UNCHANGED;
905   }
906 
907   /// See AbstractState::indicatePessimisticFixpoint().
908   ChangeStatus indicatePessimisticFixpoint() override {
909     BS.indicatePessimisticFixpoint();
910     return ChangeStatus::CHANGED;
911   }
912 
913   State &operator=(const State &R) {
914     if (this == &R)
915       return *this;
916     BS = R.BS;
917     AccessBins = R.AccessBins;
918     return *this;
919   }
920 
921   State &operator=(State &&R) {
922     if (this == &R)
923       return *this;
924     std::swap(BS, R.BS);
925     std::swap(AccessBins, R.AccessBins);
926     return *this;
927   }
928 
929   bool operator==(const State &R) const {
930     if (BS != R.BS)
931       return false;
932     if (AccessBins.size() != R.AccessBins.size())
933       return false;
934     auto It = begin(), RIt = R.begin(), E = end();
935     while (It != E) {
936       if (It->getFirst() != RIt->getFirst())
937         return false;
938       auto &Accs = It->getSecond();
939       auto &RAccs = RIt->getSecond();
940       if (Accs.size() != RAccs.size())
941         return false;
942       auto AccIt = Accs.begin(), RAccIt = RAccs.begin(), AccE = Accs.end();
943       while (AccIt != AccE) {
944         if (*AccIt != *RAccIt)
945           return false;
946         ++AccIt;
947         ++RAccIt;
948       }
949       ++It;
950       ++RIt;
951     }
952     return true;
953   }
954   bool operator!=(const State &R) const { return !(*this == R); }
955 
956   /// We store accesses in a set with the instruction as key.
957   using Accesses = DenseSet<AAPointerInfo::Access, AccessAsInstructionInfo>;
958 
959   /// We store all accesses in bins denoted by their offset and size.
960   using AccessBinsTy = DenseMap<OffsetAndSize, Accesses>;
961 
962   AccessBinsTy::const_iterator begin() const { return AccessBins.begin(); }
963   AccessBinsTy::const_iterator end() const { return AccessBins.end(); }
964 
965 protected:
966   /// The bins with all the accesses for the associated pointer.
967   DenseMap<OffsetAndSize, Accesses> AccessBins;
968 
969   /// Add a new access to the state at offset \p Offset and with size \p Size.
970   /// The access is associated with \p I, writes \p Content (if anything), and
971   /// is of kind \p Kind.
972   /// \Returns CHANGED, if the state changed, UNCHANGED otherwise.
973   ChangeStatus addAccess(int64_t Offset, int64_t Size, Instruction &I,
974                          Optional<Value *> Content,
975                          AAPointerInfo::AccessKind Kind, Type *Ty,
976                          Instruction *RemoteI = nullptr,
977                          Accesses *BinPtr = nullptr) {
978     OffsetAndSize Key{Offset, Size};
979     Accesses &Bin = BinPtr ? *BinPtr : AccessBins[Key];
980     AAPointerInfo::Access Acc(&I, RemoteI ? RemoteI : &I, Content, Kind, Ty);
981     // Check if we have an access for this instruction in this bin, if not,
982     // simply add it.
983     auto It = Bin.find(Acc);
984     if (It == Bin.end()) {
985       Bin.insert(Acc);
986       return ChangeStatus::CHANGED;
987     }
988     // If the existing access is the same as then new one, nothing changed.
989     AAPointerInfo::Access Before = *It;
990     // The new one will be combined with the existing one.
991     *It &= Acc;
992     return *It == Before ? ChangeStatus::UNCHANGED : ChangeStatus::CHANGED;
993   }
994 
995   /// See AAPointerInfo::forallInterferingAccesses.
996   bool forallInterferingAccesses(
997       Instruction &I,
998       function_ref<bool(const AAPointerInfo::Access &, bool)> CB) const {
999     if (!isValidState())
1000       return false;
1001     // First find the offset and size of I.
1002     OffsetAndSize OAS(-1, -1);
1003     for (auto &It : AccessBins) {
1004       for (auto &Access : It.getSecond()) {
1005         if (Access.getRemoteInst() == &I) {
1006           OAS = It.getFirst();
1007           break;
1008         }
1009       }
1010       if (OAS.getSize() != -1)
1011         break;
1012     }
1013     if (OAS.getSize() == -1)
1014       return true;
1015 
1016     // Now that we have an offset and size, find all overlapping ones and use
1017     // the callback on the accesses.
1018     for (auto &It : AccessBins) {
1019       OffsetAndSize ItOAS = It.getFirst();
1020       if (!OAS.mayOverlap(ItOAS))
1021         continue;
1022       for (auto &Access : It.getSecond())
1023         if (!CB(Access, OAS == ItOAS))
1024           return false;
1025     }
1026     return true;
1027   }
1028 
1029 private:
1030   /// State to track fixpoint and validity.
1031   BooleanState BS;
1032 };
1033 
1034 struct AAPointerInfoImpl
1035     : public StateWrapper<AA::PointerInfo::State, AAPointerInfo> {
1036   using BaseTy = StateWrapper<AA::PointerInfo::State, AAPointerInfo>;
1037   AAPointerInfoImpl(const IRPosition &IRP, Attributor &A) : BaseTy(IRP) {}
1038 
1039   /// See AbstractAttribute::initialize(...).
1040   void initialize(Attributor &A) override { AAPointerInfo::initialize(A); }
1041 
1042   /// See AbstractAttribute::getAsStr().
1043   const std::string getAsStr() const override {
1044     return std::string("PointerInfo ") +
1045            (isValidState() ? (std::string("#") +
1046                               std::to_string(AccessBins.size()) + " bins")
1047                            : "<invalid>");
1048   }
1049 
1050   /// See AbstractAttribute::manifest(...).
1051   ChangeStatus manifest(Attributor &A) override {
1052     return AAPointerInfo::manifest(A);
1053   }
1054 
1055   bool forallInterferingAccesses(
1056       LoadInst &LI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1057       const override {
1058     return State::forallInterferingAccesses(LI, CB);
1059   }
1060   bool forallInterferingAccesses(
1061       StoreInst &SI, function_ref<bool(const AAPointerInfo::Access &, bool)> CB)
1062       const override {
1063     return State::forallInterferingAccesses(SI, CB);
1064   }
1065 
1066   ChangeStatus translateAndAddCalleeState(Attributor &A,
1067                                           const AAPointerInfo &CalleeAA,
1068                                           int64_t CallArgOffset, CallBase &CB) {
1069     using namespace AA::PointerInfo;
1070     if (!CalleeAA.getState().isValidState() || !isValidState())
1071       return indicatePessimisticFixpoint();
1072 
1073     const auto &CalleeImplAA = static_cast<const AAPointerInfoImpl &>(CalleeAA);
1074     bool IsByval = CalleeImplAA.getAssociatedArgument()->hasByValAttr();
1075 
1076     // Combine the accesses bin by bin.
1077     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1078     for (auto &It : CalleeImplAA.getState()) {
1079       OffsetAndSize OAS = OffsetAndSize::getUnknown();
1080       if (CallArgOffset != OffsetAndSize::Unknown)
1081         OAS = OffsetAndSize(It.first.getOffset() + CallArgOffset,
1082                             It.first.getSize());
1083       Accesses &Bin = AccessBins[OAS];
1084       for (const AAPointerInfo::Access &RAcc : It.second) {
1085         if (IsByval && !RAcc.isRead())
1086           continue;
1087         bool UsedAssumedInformation = false;
1088         Optional<Value *> Content = A.translateArgumentToCallSiteContent(
1089             RAcc.getContent(), CB, *this, UsedAssumedInformation);
1090         AccessKind AK =
1091             AccessKind(RAcc.getKind() & (IsByval ? AccessKind::AK_READ
1092                                                  : AccessKind::AK_READ_WRITE));
1093         Changed =
1094             Changed | addAccess(OAS.getOffset(), OAS.getSize(), CB, Content, AK,
1095                                 RAcc.getType(), RAcc.getRemoteInst(), &Bin);
1096       }
1097     }
1098     return Changed;
1099   }
1100 
1101   /// Statistic tracking for all AAPointerInfo implementations.
1102   /// See AbstractAttribute::trackStatistics().
1103   void trackPointerInfoStatistics(const IRPosition &IRP) const {}
1104 };
1105 
1106 struct AAPointerInfoFloating : public AAPointerInfoImpl {
1107   using AccessKind = AAPointerInfo::AccessKind;
1108   AAPointerInfoFloating(const IRPosition &IRP, Attributor &A)
1109       : AAPointerInfoImpl(IRP, A) {}
1110 
1111   /// See AbstractAttribute::initialize(...).
1112   void initialize(Attributor &A) override { AAPointerInfoImpl::initialize(A); }
1113 
1114   /// Deal with an access and signal if it was handled successfully.
1115   bool handleAccess(Attributor &A, Instruction &I, Value &Ptr,
1116                     Optional<Value *> Content, AccessKind Kind, int64_t Offset,
1117                     ChangeStatus &Changed, Type *Ty,
1118                     int64_t Size = AA::PointerInfo::OffsetAndSize::Unknown) {
1119     using namespace AA::PointerInfo;
1120     // No need to find a size if one is given or the offset is unknown.
1121     if (Offset != OffsetAndSize::Unknown && Size == OffsetAndSize::Unknown &&
1122         Ty) {
1123       const DataLayout &DL = A.getDataLayout();
1124       TypeSize AccessSize = DL.getTypeStoreSize(Ty);
1125       if (!AccessSize.isScalable())
1126         Size = AccessSize.getFixedSize();
1127     }
1128     Changed = Changed | addAccess(Offset, Size, I, Content, Kind, Ty);
1129     return true;
1130   };
1131 
1132   /// Helper struct, will support ranges eventually.
1133   struct OffsetInfo {
1134     int64_t Offset = AA::PointerInfo::OffsetAndSize::Unknown;
1135 
1136     bool operator==(const OffsetInfo &OI) const { return Offset == OI.Offset; }
1137   };
1138 
1139   /// See AbstractAttribute::updateImpl(...).
1140   ChangeStatus updateImpl(Attributor &A) override {
1141     using namespace AA::PointerInfo;
1142     State S = getState();
1143     ChangeStatus Changed = ChangeStatus::UNCHANGED;
1144     Value &AssociatedValue = getAssociatedValue();
1145 
1146     const DataLayout &DL = A.getDataLayout();
1147     DenseMap<Value *, OffsetInfo> OffsetInfoMap;
1148     OffsetInfoMap[&AssociatedValue] = OffsetInfo{0};
1149 
1150     auto HandlePassthroughUser = [&](Value *Usr, OffsetInfo &PtrOI,
1151                                      bool &Follow) {
1152       OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1153       UsrOI = PtrOI;
1154       Follow = true;
1155       return true;
1156     };
1157 
1158     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
1159       Value *CurPtr = U.get();
1160       User *Usr = U.getUser();
1161       LLVM_DEBUG(dbgs() << "[AAPointerInfo] Analyze " << *CurPtr << " in "
1162                         << *Usr << "\n");
1163 
1164       OffsetInfo &PtrOI = OffsetInfoMap[CurPtr];
1165 
1166       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Usr)) {
1167         if (CE->isCast())
1168           return HandlePassthroughUser(Usr, PtrOI, Follow);
1169         if (CE->isCompare())
1170           return true;
1171         if (!CE->isGEPWithNoNotionalOverIndexing()) {
1172           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled constant user " << *CE
1173                             << "\n");
1174           return false;
1175         }
1176       }
1177       if (auto *GEP = dyn_cast<GEPOperator>(Usr)) {
1178         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1179         UsrOI = PtrOI;
1180 
1181         // TODO: Use range information.
1182         if (PtrOI.Offset == OffsetAndSize::Unknown ||
1183             !GEP->hasAllConstantIndices()) {
1184           UsrOI.Offset = OffsetAndSize::Unknown;
1185           Follow = true;
1186           return true;
1187         }
1188 
1189         SmallVector<Value *, 8> Indices;
1190         for (Use &Idx : llvm::make_range(GEP->idx_begin(), GEP->idx_end())) {
1191           if (auto *CIdx = dyn_cast<ConstantInt>(Idx)) {
1192             Indices.push_back(CIdx);
1193             continue;
1194           }
1195 
1196           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Non constant GEP index " << *GEP
1197                             << " : " << *Idx << "\n");
1198           return false;
1199         }
1200         UsrOI.Offset = PtrOI.Offset +
1201                        DL.getIndexedOffsetInType(
1202                            CurPtr->getType()->getPointerElementType(), Indices);
1203         Follow = true;
1204         return true;
1205       }
1206       if (isa<CastInst>(Usr) || isa<SelectInst>(Usr))
1207         return HandlePassthroughUser(Usr, PtrOI, Follow);
1208 
1209       // For PHIs we need to take care of the recurrence explicitly as the value
1210       // might change while we iterate through a loop. For now, we give up if
1211       // the PHI is not invariant.
1212       if (isa<PHINode>(Usr)) {
1213         // Check if the PHI is invariant (so far).
1214         OffsetInfo &UsrOI = OffsetInfoMap[Usr];
1215         if (UsrOI == PtrOI)
1216           return true;
1217 
1218         // Check if the PHI operand has already an unknown offset as we can't
1219         // improve on that anymore.
1220         if (PtrOI.Offset == OffsetAndSize::Unknown) {
1221           UsrOI = PtrOI;
1222           Follow = true;
1223           return true;
1224         }
1225 
1226         // Check if the PHI operand is not dependent on the PHI itself.
1227         // TODO: This is not great as we look at the pointer type. However, it
1228         // is unclear where the Offset size comes from with typeless pointers.
1229         APInt Offset(
1230             DL.getIndexSizeInBits(CurPtr->getType()->getPointerAddressSpace()),
1231             0);
1232         if (&AssociatedValue == CurPtr->stripAndAccumulateConstantOffsets(
1233                                     DL, Offset, /* AllowNonInbounds */ true)) {
1234           if (Offset != PtrOI.Offset) {
1235             LLVM_DEBUG(dbgs()
1236                        << "[AAPointerInfo] PHI operand pointer offset mismatch "
1237                        << *CurPtr << " in " << *Usr << "\n");
1238             return false;
1239           }
1240           return HandlePassthroughUser(Usr, PtrOI, Follow);
1241         }
1242 
1243         // TODO: Approximate in case we know the direction of the recurrence.
1244         LLVM_DEBUG(dbgs() << "[AAPointerInfo] PHI operand is too complex "
1245                           << *CurPtr << " in " << *Usr << "\n");
1246         UsrOI = PtrOI;
1247         UsrOI.Offset = OffsetAndSize::Unknown;
1248         Follow = true;
1249         return true;
1250       }
1251 
1252       if (auto *LoadI = dyn_cast<LoadInst>(Usr))
1253         return handleAccess(A, *LoadI, *CurPtr, /* Content */ nullptr,
1254                             AccessKind::AK_READ, PtrOI.Offset, Changed,
1255                             LoadI->getType());
1256       if (auto *StoreI = dyn_cast<StoreInst>(Usr)) {
1257         if (StoreI->getValueOperand() == CurPtr) {
1258           LLVM_DEBUG(dbgs() << "[AAPointerInfo] Escaping use in store "
1259                             << *StoreI << "\n");
1260           return false;
1261         }
1262         bool UsedAssumedInformation = false;
1263         Optional<Value *> Content = A.getAssumedSimplified(
1264             *StoreI->getValueOperand(), *this, UsedAssumedInformation);
1265         return handleAccess(A, *StoreI, *CurPtr, Content, AccessKind::AK_WRITE,
1266                             PtrOI.Offset, Changed,
1267                             StoreI->getValueOperand()->getType());
1268       }
1269       if (auto *CB = dyn_cast<CallBase>(Usr)) {
1270         if (CB->isLifetimeStartOrEnd())
1271           return true;
1272         if (CB->isArgOperand(&U)) {
1273           unsigned ArgNo = CB->getArgOperandNo(&U);
1274           const auto &CSArgPI = A.getAAFor<AAPointerInfo>(
1275               *this, IRPosition::callsite_argument(*CB, ArgNo),
1276               DepClassTy::REQUIRED);
1277           Changed = translateAndAddCalleeState(A, CSArgPI, PtrOI.Offset, *CB) |
1278                     Changed;
1279           return true;
1280         }
1281         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Call user not handled " << *CB
1282                           << "\n");
1283         // TODO: Allow some call uses
1284         return false;
1285       }
1286 
1287       LLVM_DEBUG(dbgs() << "[AAPointerInfo] User not handled " << *Usr << "\n");
1288       return false;
1289     };
1290     if (!A.checkForAllUses(UsePred, *this, AssociatedValue,
1291                            /* CheckBBLivenessOnly */ true))
1292       return indicatePessimisticFixpoint();
1293 
1294     LLVM_DEBUG({
1295       dbgs() << "Accesses by bin after update:\n";
1296       for (auto &It : AccessBins) {
1297         dbgs() << "[" << It.first.getOffset() << "-"
1298                << It.first.getOffset() + It.first.getSize()
1299                << "] : " << It.getSecond().size() << "\n";
1300         for (auto &Acc : It.getSecond()) {
1301           dbgs() << "     - " << Acc.getKind() << " - " << *Acc.getLocalInst()
1302                  << "\n";
1303           if (Acc.getLocalInst() != Acc.getRemoteInst())
1304             dbgs() << "     -->                         "
1305                    << *Acc.getRemoteInst() << "\n";
1306           if (!Acc.isWrittenValueYetUndetermined())
1307             dbgs() << "     - " << Acc.getWrittenValue() << "\n";
1308         }
1309       }
1310     });
1311 
1312     return Changed;
1313   }
1314 
1315   /// See AbstractAttribute::trackStatistics()
1316   void trackStatistics() const override {
1317     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1318   }
1319 };
1320 
1321 struct AAPointerInfoReturned final : AAPointerInfoImpl {
1322   AAPointerInfoReturned(const IRPosition &IRP, Attributor &A)
1323       : AAPointerInfoImpl(IRP, A) {}
1324 
1325   /// See AbstractAttribute::updateImpl(...).
1326   ChangeStatus updateImpl(Attributor &A) override {
1327     return indicatePessimisticFixpoint();
1328   }
1329 
1330   /// See AbstractAttribute::trackStatistics()
1331   void trackStatistics() const override {
1332     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1333   }
1334 };
1335 
1336 struct AAPointerInfoArgument final : AAPointerInfoFloating {
1337   AAPointerInfoArgument(const IRPosition &IRP, Attributor &A)
1338       : AAPointerInfoFloating(IRP, A) {}
1339 
1340   /// See AbstractAttribute::initialize(...).
1341   void initialize(Attributor &A) override {
1342     AAPointerInfoFloating::initialize(A);
1343     if (getAnchorScope()->isDeclaration())
1344       indicatePessimisticFixpoint();
1345   }
1346 
1347   /// See AbstractAttribute::trackStatistics()
1348   void trackStatistics() const override {
1349     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1350   }
1351 };
1352 
1353 struct AAPointerInfoCallSiteArgument final : AAPointerInfoFloating {
1354   AAPointerInfoCallSiteArgument(const IRPosition &IRP, Attributor &A)
1355       : AAPointerInfoFloating(IRP, A) {}
1356 
1357   /// See AbstractAttribute::updateImpl(...).
1358   ChangeStatus updateImpl(Attributor &A) override {
1359     using namespace AA::PointerInfo;
1360     // We handle memory intrinsics explicitly, at least the first (=
1361     // destination) and second (=source) arguments as we know how they are
1362     // accessed.
1363     if (auto *MI = dyn_cast_or_null<MemIntrinsic>(getCtxI())) {
1364       ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1365       int64_t LengthVal = OffsetAndSize::Unknown;
1366       if (Length)
1367         LengthVal = Length->getSExtValue();
1368       Value &Ptr = getAssociatedValue();
1369       unsigned ArgNo = getIRPosition().getCallSiteArgNo();
1370       ChangeStatus Changed;
1371       if (ArgNo == 0) {
1372         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_WRITE, 0, Changed,
1373                      nullptr, LengthVal);
1374       } else if (ArgNo == 1) {
1375         handleAccess(A, *MI, Ptr, nullptr, AccessKind::AK_READ, 0, Changed,
1376                      nullptr, LengthVal);
1377       } else {
1378         LLVM_DEBUG(dbgs() << "[AAPointerInfo] Unhandled memory intrinsic "
1379                           << *MI << "\n");
1380         return indicatePessimisticFixpoint();
1381       }
1382       return Changed;
1383     }
1384 
1385     // TODO: Once we have call site specific value information we can provide
1386     //       call site specific liveness information and then it makes
1387     //       sense to specialize attributes for call sites arguments instead of
1388     //       redirecting requests to the callee argument.
1389     Argument *Arg = getAssociatedArgument();
1390     if (!Arg)
1391       return indicatePessimisticFixpoint();
1392     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1393     auto &ArgAA =
1394         A.getAAFor<AAPointerInfo>(*this, ArgPos, DepClassTy::REQUIRED);
1395     return translateAndAddCalleeState(A, ArgAA, 0, *cast<CallBase>(getCtxI()));
1396   }
1397 
1398   /// See AbstractAttribute::trackStatistics()
1399   void trackStatistics() const override {
1400     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1401   }
1402 };
1403 
1404 struct AAPointerInfoCallSiteReturned final : AAPointerInfoFloating {
1405   AAPointerInfoCallSiteReturned(const IRPosition &IRP, Attributor &A)
1406       : AAPointerInfoFloating(IRP, A) {}
1407 
1408   /// See AbstractAttribute::trackStatistics()
1409   void trackStatistics() const override {
1410     AAPointerInfoImpl::trackPointerInfoStatistics(getIRPosition());
1411   }
1412 };
1413 
1414 /// -----------------------NoUnwind Function Attribute--------------------------
1415 
1416 struct AANoUnwindImpl : AANoUnwind {
1417   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
1418 
1419   const std::string getAsStr() const override {
1420     return getAssumed() ? "nounwind" : "may-unwind";
1421   }
1422 
1423   /// See AbstractAttribute::updateImpl(...).
1424   ChangeStatus updateImpl(Attributor &A) override {
1425     auto Opcodes = {
1426         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
1427         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
1428         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
1429 
1430     auto CheckForNoUnwind = [&](Instruction &I) {
1431       if (!I.mayThrow())
1432         return true;
1433 
1434       if (const auto *CB = dyn_cast<CallBase>(&I)) {
1435         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
1436             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1437         return NoUnwindAA.isAssumedNoUnwind();
1438       }
1439       return false;
1440     };
1441 
1442     bool UsedAssumedInformation = false;
1443     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes,
1444                                    UsedAssumedInformation))
1445       return indicatePessimisticFixpoint();
1446 
1447     return ChangeStatus::UNCHANGED;
1448   }
1449 };
1450 
1451 struct AANoUnwindFunction final : public AANoUnwindImpl {
1452   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
1453       : AANoUnwindImpl(IRP, A) {}
1454 
1455   /// See AbstractAttribute::trackStatistics()
1456   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
1457 };
1458 
1459 /// NoUnwind attribute deduction for a call sites.
1460 struct AANoUnwindCallSite final : AANoUnwindImpl {
1461   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
1462       : AANoUnwindImpl(IRP, A) {}
1463 
1464   /// See AbstractAttribute::initialize(...).
1465   void initialize(Attributor &A) override {
1466     AANoUnwindImpl::initialize(A);
1467     Function *F = getAssociatedFunction();
1468     if (!F || F->isDeclaration())
1469       indicatePessimisticFixpoint();
1470   }
1471 
1472   /// See AbstractAttribute::updateImpl(...).
1473   ChangeStatus updateImpl(Attributor &A) override {
1474     // TODO: Once we have call site specific value information we can provide
1475     //       call site specific liveness information and then it makes
1476     //       sense to specialize attributes for call sites arguments instead of
1477     //       redirecting requests to the callee argument.
1478     Function *F = getAssociatedFunction();
1479     const IRPosition &FnPos = IRPosition::function(*F);
1480     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
1481     return clampStateAndIndicateChange(getState(), FnAA.getState());
1482   }
1483 
1484   /// See AbstractAttribute::trackStatistics()
1485   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
1486 };
1487 
1488 /// --------------------- Function Return Values -------------------------------
1489 
1490 /// "Attribute" that collects all potential returned values and the return
1491 /// instructions that they arise from.
1492 ///
1493 /// If there is a unique returned value R, the manifest method will:
1494 ///   - mark R with the "returned" attribute, if R is an argument.
1495 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
1496 
1497   /// Mapping of values potentially returned by the associated function to the
1498   /// return instructions that might return them.
1499   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
1500 
1501   /// State flags
1502   ///
1503   ///{
1504   bool IsFixed = false;
1505   bool IsValidState = true;
1506   ///}
1507 
1508 public:
1509   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
1510       : AAReturnedValues(IRP, A) {}
1511 
1512   /// See AbstractAttribute::initialize(...).
1513   void initialize(Attributor &A) override {
1514     // Reset the state.
1515     IsFixed = false;
1516     IsValidState = true;
1517     ReturnedValues.clear();
1518 
1519     Function *F = getAssociatedFunction();
1520     if (!F || F->isDeclaration()) {
1521       indicatePessimisticFixpoint();
1522       return;
1523     }
1524     assert(!F->getReturnType()->isVoidTy() &&
1525            "Did not expect a void return type!");
1526 
1527     // The map from instruction opcodes to those instructions in the function.
1528     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
1529 
1530     // Look through all arguments, if one is marked as returned we are done.
1531     for (Argument &Arg : F->args()) {
1532       if (Arg.hasReturnedAttr()) {
1533         auto &ReturnInstSet = ReturnedValues[&Arg];
1534         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
1535           for (Instruction *RI : *Insts)
1536             ReturnInstSet.insert(cast<ReturnInst>(RI));
1537 
1538         indicateOptimisticFixpoint();
1539         return;
1540       }
1541     }
1542 
1543     if (!A.isFunctionIPOAmendable(*F))
1544       indicatePessimisticFixpoint();
1545   }
1546 
1547   /// See AbstractAttribute::manifest(...).
1548   ChangeStatus manifest(Attributor &A) override;
1549 
1550   /// See AbstractAttribute::getState(...).
1551   AbstractState &getState() override { return *this; }
1552 
1553   /// See AbstractAttribute::getState(...).
1554   const AbstractState &getState() const override { return *this; }
1555 
1556   /// See AbstractAttribute::updateImpl(Attributor &A).
1557   ChangeStatus updateImpl(Attributor &A) override;
1558 
1559   llvm::iterator_range<iterator> returned_values() override {
1560     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1561   }
1562 
1563   llvm::iterator_range<const_iterator> returned_values() const override {
1564     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
1565   }
1566 
1567   /// Return the number of potential return values, -1 if unknown.
1568   size_t getNumReturnValues() const override {
1569     return isValidState() ? ReturnedValues.size() : -1;
1570   }
1571 
1572   /// Return an assumed unique return value if a single candidate is found. If
1573   /// there cannot be one, return a nullptr. If it is not clear yet, return the
1574   /// Optional::NoneType.
1575   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
1576 
1577   /// See AbstractState::checkForAllReturnedValues(...).
1578   bool checkForAllReturnedValuesAndReturnInsts(
1579       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1580       const override;
1581 
1582   /// Pretty print the attribute similar to the IR representation.
1583   const std::string getAsStr() const override;
1584 
1585   /// See AbstractState::isAtFixpoint().
1586   bool isAtFixpoint() const override { return IsFixed; }
1587 
1588   /// See AbstractState::isValidState().
1589   bool isValidState() const override { return IsValidState; }
1590 
1591   /// See AbstractState::indicateOptimisticFixpoint(...).
1592   ChangeStatus indicateOptimisticFixpoint() override {
1593     IsFixed = true;
1594     return ChangeStatus::UNCHANGED;
1595   }
1596 
1597   ChangeStatus indicatePessimisticFixpoint() override {
1598     IsFixed = true;
1599     IsValidState = false;
1600     return ChangeStatus::CHANGED;
1601   }
1602 };
1603 
1604 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
1605   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1606 
1607   // Bookkeeping.
1608   assert(isValidState());
1609   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
1610                   "Number of function with known return values");
1611 
1612   // Check if we have an assumed unique return value that we could manifest.
1613   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
1614 
1615   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
1616     return Changed;
1617 
1618   // Bookkeeping.
1619   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
1620                   "Number of function with unique return");
1621   // If the assumed unique return value is an argument, annotate it.
1622   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
1623     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
1624             getAssociatedFunction()->getReturnType())) {
1625       getIRPosition() = IRPosition::argument(*UniqueRVArg);
1626       Changed = IRAttribute::manifest(A);
1627     }
1628   }
1629   return Changed;
1630 }
1631 
1632 const std::string AAReturnedValuesImpl::getAsStr() const {
1633   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1634          (isValidState() ? std::to_string(getNumReturnValues()) : "?") + ")";
1635 }
1636 
1637 Optional<Value *>
1638 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1639   // If checkForAllReturnedValues provides a unique value, ignoring potential
1640   // undef values that can also be present, it is assumed to be the actual
1641   // return value and forwarded to the caller of this method. If there are
1642   // multiple, a nullptr is returned indicating there cannot be a unique
1643   // returned value.
1644   Optional<Value *> UniqueRV;
1645   Type *Ty = getAssociatedFunction()->getReturnType();
1646 
1647   auto Pred = [&](Value &RV) -> bool {
1648     UniqueRV = AA::combineOptionalValuesInAAValueLatice(UniqueRV, &RV, Ty);
1649     return UniqueRV != Optional<Value *>(nullptr);
1650   };
1651 
1652   if (!A.checkForAllReturnedValues(Pred, *this))
1653     UniqueRV = nullptr;
1654 
1655   return UniqueRV;
1656 }
1657 
1658 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1659     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1660     const {
1661   if (!isValidState())
1662     return false;
1663 
1664   // Check all returned values but ignore call sites as long as we have not
1665   // encountered an overdefined one during an update.
1666   for (auto &It : ReturnedValues) {
1667     Value *RV = It.first;
1668     if (!Pred(*RV, It.second))
1669       return false;
1670   }
1671 
1672   return true;
1673 }
1674 
1675 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1676   ChangeStatus Changed = ChangeStatus::UNCHANGED;
1677 
1678   auto ReturnValueCB = [&](Value &V, const Instruction *CtxI, ReturnInst &Ret,
1679                            bool) -> bool {
1680     bool UsedAssumedInformation = false;
1681     Optional<Value *> SimpleRetVal =
1682         A.getAssumedSimplified(V, *this, UsedAssumedInformation);
1683     if (!SimpleRetVal.hasValue())
1684       return true;
1685     if (!SimpleRetVal.getValue())
1686       return false;
1687     Value *RetVal = *SimpleRetVal;
1688     assert(AA::isValidInScope(*RetVal, Ret.getFunction()) &&
1689            "Assumed returned value should be valid in function scope!");
1690     if (ReturnedValues[RetVal].insert(&Ret))
1691       Changed = ChangeStatus::CHANGED;
1692     return true;
1693   };
1694 
1695   auto ReturnInstCB = [&](Instruction &I) {
1696     ReturnInst &Ret = cast<ReturnInst>(I);
1697     return genericValueTraversal<ReturnInst>(
1698         A, IRPosition::value(*Ret.getReturnValue()), *this, Ret, ReturnValueCB,
1699         &I);
1700   };
1701 
1702   // Discover returned values from all live returned instructions in the
1703   // associated function.
1704   bool UsedAssumedInformation = false;
1705   if (!A.checkForAllInstructions(ReturnInstCB, *this, {Instruction::Ret},
1706                                  UsedAssumedInformation))
1707     return indicatePessimisticFixpoint();
1708   return Changed;
1709 }
1710 
1711 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1712   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1713       : AAReturnedValuesImpl(IRP, A) {}
1714 
1715   /// See AbstractAttribute::trackStatistics()
1716   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1717 };
1718 
1719 /// Returned values information for a call sites.
1720 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1721   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1722       : AAReturnedValuesImpl(IRP, A) {}
1723 
1724   /// See AbstractAttribute::initialize(...).
1725   void initialize(Attributor &A) override {
1726     // TODO: Once we have call site specific value information we can provide
1727     //       call site specific liveness information and then it makes
1728     //       sense to specialize attributes for call sites instead of
1729     //       redirecting requests to the callee.
1730     llvm_unreachable("Abstract attributes for returned values are not "
1731                      "supported for call sites yet!");
1732   }
1733 
1734   /// See AbstractAttribute::updateImpl(...).
1735   ChangeStatus updateImpl(Attributor &A) override {
1736     return indicatePessimisticFixpoint();
1737   }
1738 
1739   /// See AbstractAttribute::trackStatistics()
1740   void trackStatistics() const override {}
1741 };
1742 
1743 /// ------------------------ NoSync Function Attribute -------------------------
1744 
1745 struct AANoSyncImpl : AANoSync {
1746   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1747 
1748   const std::string getAsStr() const override {
1749     return getAssumed() ? "nosync" : "may-sync";
1750   }
1751 
1752   /// See AbstractAttribute::updateImpl(...).
1753   ChangeStatus updateImpl(Attributor &A) override;
1754 
1755   /// Helper function used to determine whether an instruction is non-relaxed
1756   /// atomic. In other words, if an atomic instruction does not have unordered
1757   /// or monotonic ordering
1758   static bool isNonRelaxedAtomic(Instruction *I);
1759 
1760   /// Helper function specific for intrinsics which are potentially volatile
1761   static bool isNoSyncIntrinsic(Instruction *I);
1762 };
1763 
1764 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1765   if (!I->isAtomic())
1766     return false;
1767 
1768   if (auto *FI = dyn_cast<FenceInst>(I))
1769     // All legal orderings for fence are stronger than monotonic.
1770     return FI->getSyncScopeID() != SyncScope::SingleThread;
1771   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1772     // Unordered is not a legal ordering for cmpxchg.
1773     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1774             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1775   }
1776 
1777   AtomicOrdering Ordering;
1778   switch (I->getOpcode()) {
1779   case Instruction::AtomicRMW:
1780     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1781     break;
1782   case Instruction::Store:
1783     Ordering = cast<StoreInst>(I)->getOrdering();
1784     break;
1785   case Instruction::Load:
1786     Ordering = cast<LoadInst>(I)->getOrdering();
1787     break;
1788   default:
1789     llvm_unreachable(
1790         "New atomic operations need to be known in the attributor.");
1791   }
1792 
1793   return (Ordering != AtomicOrdering::Unordered &&
1794           Ordering != AtomicOrdering::Monotonic);
1795 }
1796 
1797 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1798 /// which would be nosync except that they have a volatile flag.  All other
1799 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1800 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1801   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1802     return !MI->isVolatile();
1803   return false;
1804 }
1805 
1806 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1807 
1808   auto CheckRWInstForNoSync = [&](Instruction &I) {
1809     /// We are looking for volatile instructions or Non-Relaxed atomics.
1810 
1811     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1812       if (CB->hasFnAttr(Attribute::NoSync))
1813         return true;
1814 
1815       if (isNoSyncIntrinsic(&I))
1816         return true;
1817 
1818       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1819           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1820       return NoSyncAA.isAssumedNoSync();
1821     }
1822 
1823     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1824       return true;
1825 
1826     return false;
1827   };
1828 
1829   auto CheckForNoSync = [&](Instruction &I) {
1830     // At this point we handled all read/write effects and they are all
1831     // nosync, so they can be skipped.
1832     if (I.mayReadOrWriteMemory())
1833       return true;
1834 
1835     // non-convergent and readnone imply nosync.
1836     return !cast<CallBase>(I).isConvergent();
1837   };
1838 
1839   bool UsedAssumedInformation = false;
1840   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this,
1841                                           UsedAssumedInformation) ||
1842       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this,
1843                                          UsedAssumedInformation))
1844     return indicatePessimisticFixpoint();
1845 
1846   return ChangeStatus::UNCHANGED;
1847 }
1848 
1849 struct AANoSyncFunction final : public AANoSyncImpl {
1850   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1851       : AANoSyncImpl(IRP, A) {}
1852 
1853   /// See AbstractAttribute::trackStatistics()
1854   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1855 };
1856 
1857 /// NoSync attribute deduction for a call sites.
1858 struct AANoSyncCallSite final : AANoSyncImpl {
1859   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1860       : AANoSyncImpl(IRP, A) {}
1861 
1862   /// See AbstractAttribute::initialize(...).
1863   void initialize(Attributor &A) override {
1864     AANoSyncImpl::initialize(A);
1865     Function *F = getAssociatedFunction();
1866     if (!F || F->isDeclaration())
1867       indicatePessimisticFixpoint();
1868   }
1869 
1870   /// See AbstractAttribute::updateImpl(...).
1871   ChangeStatus updateImpl(Attributor &A) override {
1872     // TODO: Once we have call site specific value information we can provide
1873     //       call site specific liveness information and then it makes
1874     //       sense to specialize attributes for call sites arguments instead of
1875     //       redirecting requests to the callee argument.
1876     Function *F = getAssociatedFunction();
1877     const IRPosition &FnPos = IRPosition::function(*F);
1878     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1879     return clampStateAndIndicateChange(getState(), FnAA.getState());
1880   }
1881 
1882   /// See AbstractAttribute::trackStatistics()
1883   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1884 };
1885 
1886 /// ------------------------ No-Free Attributes ----------------------------
1887 
1888 struct AANoFreeImpl : public AANoFree {
1889   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1890 
1891   /// See AbstractAttribute::updateImpl(...).
1892   ChangeStatus updateImpl(Attributor &A) override {
1893     auto CheckForNoFree = [&](Instruction &I) {
1894       const auto &CB = cast<CallBase>(I);
1895       if (CB.hasFnAttr(Attribute::NoFree))
1896         return true;
1897 
1898       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1899           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1900       return NoFreeAA.isAssumedNoFree();
1901     };
1902 
1903     bool UsedAssumedInformation = false;
1904     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this,
1905                                            UsedAssumedInformation))
1906       return indicatePessimisticFixpoint();
1907     return ChangeStatus::UNCHANGED;
1908   }
1909 
1910   /// See AbstractAttribute::getAsStr().
1911   const std::string getAsStr() const override {
1912     return getAssumed() ? "nofree" : "may-free";
1913   }
1914 };
1915 
1916 struct AANoFreeFunction final : public AANoFreeImpl {
1917   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1918       : AANoFreeImpl(IRP, A) {}
1919 
1920   /// See AbstractAttribute::trackStatistics()
1921   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1922 };
1923 
1924 /// NoFree attribute deduction for a call sites.
1925 struct AANoFreeCallSite final : AANoFreeImpl {
1926   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1927       : AANoFreeImpl(IRP, A) {}
1928 
1929   /// See AbstractAttribute::initialize(...).
1930   void initialize(Attributor &A) override {
1931     AANoFreeImpl::initialize(A);
1932     Function *F = getAssociatedFunction();
1933     if (!F || F->isDeclaration())
1934       indicatePessimisticFixpoint();
1935   }
1936 
1937   /// See AbstractAttribute::updateImpl(...).
1938   ChangeStatus updateImpl(Attributor &A) override {
1939     // TODO: Once we have call site specific value information we can provide
1940     //       call site specific liveness information and then it makes
1941     //       sense to specialize attributes for call sites arguments instead of
1942     //       redirecting requests to the callee argument.
1943     Function *F = getAssociatedFunction();
1944     const IRPosition &FnPos = IRPosition::function(*F);
1945     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1946     return clampStateAndIndicateChange(getState(), FnAA.getState());
1947   }
1948 
1949   /// See AbstractAttribute::trackStatistics()
1950   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1951 };
1952 
1953 /// NoFree attribute for floating values.
1954 struct AANoFreeFloating : AANoFreeImpl {
1955   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1956       : AANoFreeImpl(IRP, A) {}
1957 
1958   /// See AbstractAttribute::trackStatistics()
1959   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1960 
1961   /// See Abstract Attribute::updateImpl(...).
1962   ChangeStatus updateImpl(Attributor &A) override {
1963     const IRPosition &IRP = getIRPosition();
1964 
1965     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1966         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1967     if (NoFreeAA.isAssumedNoFree())
1968       return ChangeStatus::UNCHANGED;
1969 
1970     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1971     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1972       Instruction *UserI = cast<Instruction>(U.getUser());
1973       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1974         if (CB->isBundleOperand(&U))
1975           return false;
1976         if (!CB->isArgOperand(&U))
1977           return true;
1978         unsigned ArgNo = CB->getArgOperandNo(&U);
1979 
1980         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1981             *this, IRPosition::callsite_argument(*CB, ArgNo),
1982             DepClassTy::REQUIRED);
1983         return NoFreeArg.isAssumedNoFree();
1984       }
1985 
1986       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1987           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1988         Follow = true;
1989         return true;
1990       }
1991       if (isa<StoreInst>(UserI) || isa<LoadInst>(UserI) ||
1992           isa<ReturnInst>(UserI))
1993         return true;
1994 
1995       // Unknown user.
1996       return false;
1997     };
1998     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1999       return indicatePessimisticFixpoint();
2000 
2001     return ChangeStatus::UNCHANGED;
2002   }
2003 };
2004 
2005 /// NoFree attribute for a call site argument.
2006 struct AANoFreeArgument final : AANoFreeFloating {
2007   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
2008       : AANoFreeFloating(IRP, A) {}
2009 
2010   /// See AbstractAttribute::trackStatistics()
2011   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
2012 };
2013 
2014 /// NoFree attribute for call site arguments.
2015 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
2016   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
2017       : AANoFreeFloating(IRP, A) {}
2018 
2019   /// See AbstractAttribute::updateImpl(...).
2020   ChangeStatus updateImpl(Attributor &A) override {
2021     // TODO: Once we have call site specific value information we can provide
2022     //       call site specific liveness information and then it makes
2023     //       sense to specialize attributes for call sites arguments instead of
2024     //       redirecting requests to the callee argument.
2025     Argument *Arg = getAssociatedArgument();
2026     if (!Arg)
2027       return indicatePessimisticFixpoint();
2028     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2029     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
2030     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2031   }
2032 
2033   /// See AbstractAttribute::trackStatistics()
2034   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
2035 };
2036 
2037 /// NoFree attribute for function return value.
2038 struct AANoFreeReturned final : AANoFreeFloating {
2039   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
2040       : AANoFreeFloating(IRP, A) {
2041     llvm_unreachable("NoFree is not applicable to function returns!");
2042   }
2043 
2044   /// See AbstractAttribute::initialize(...).
2045   void initialize(Attributor &A) override {
2046     llvm_unreachable("NoFree is not applicable to function returns!");
2047   }
2048 
2049   /// See AbstractAttribute::updateImpl(...).
2050   ChangeStatus updateImpl(Attributor &A) override {
2051     llvm_unreachable("NoFree is not applicable to function returns!");
2052   }
2053 
2054   /// See AbstractAttribute::trackStatistics()
2055   void trackStatistics() const override {}
2056 };
2057 
2058 /// NoFree attribute deduction for a call site return value.
2059 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
2060   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
2061       : AANoFreeFloating(IRP, A) {}
2062 
2063   ChangeStatus manifest(Attributor &A) override {
2064     return ChangeStatus::UNCHANGED;
2065   }
2066   /// See AbstractAttribute::trackStatistics()
2067   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
2068 };
2069 
2070 /// ------------------------ NonNull Argument Attribute ------------------------
2071 static int64_t getKnownNonNullAndDerefBytesForUse(
2072     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
2073     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
2074   TrackUse = false;
2075 
2076   const Value *UseV = U->get();
2077   if (!UseV->getType()->isPointerTy())
2078     return 0;
2079 
2080   // We need to follow common pointer manipulation uses to the accesses they
2081   // feed into. We can try to be smart to avoid looking through things we do not
2082   // like for now, e.g., non-inbounds GEPs.
2083   if (isa<CastInst>(I)) {
2084     TrackUse = true;
2085     return 0;
2086   }
2087 
2088   if (isa<GetElementPtrInst>(I)) {
2089     TrackUse = true;
2090     return 0;
2091   }
2092 
2093   Type *PtrTy = UseV->getType();
2094   const Function *F = I->getFunction();
2095   bool NullPointerIsDefined =
2096       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
2097   const DataLayout &DL = A.getInfoCache().getDL();
2098   if (const auto *CB = dyn_cast<CallBase>(I)) {
2099     if (CB->isBundleOperand(U)) {
2100       if (RetainedKnowledge RK = getKnowledgeFromUse(
2101               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
2102         IsNonNull |=
2103             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
2104         return RK.ArgValue;
2105       }
2106       return 0;
2107     }
2108 
2109     if (CB->isCallee(U)) {
2110       IsNonNull |= !NullPointerIsDefined;
2111       return 0;
2112     }
2113 
2114     unsigned ArgNo = CB->getArgOperandNo(U);
2115     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
2116     // As long as we only use known information there is no need to track
2117     // dependences here.
2118     auto &DerefAA =
2119         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
2120     IsNonNull |= DerefAA.isKnownNonNull();
2121     return DerefAA.getKnownDereferenceableBytes();
2122   }
2123 
2124   int64_t Offset;
2125   const Value *Base =
2126       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
2127   if (Base) {
2128     if (Base == &AssociatedValue &&
2129         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2130       int64_t DerefBytes =
2131           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
2132 
2133       IsNonNull |= !NullPointerIsDefined;
2134       return std::max(int64_t(0), DerefBytes);
2135     }
2136   }
2137 
2138   /// Corner case when an offset is 0.
2139   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
2140                                               /*AllowNonInbounds*/ true);
2141   if (Base) {
2142     if (Offset == 0 && Base == &AssociatedValue &&
2143         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
2144       int64_t DerefBytes =
2145           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
2146       IsNonNull |= !NullPointerIsDefined;
2147       return std::max(int64_t(0), DerefBytes);
2148     }
2149   }
2150 
2151   return 0;
2152 }
2153 
2154 struct AANonNullImpl : AANonNull {
2155   AANonNullImpl(const IRPosition &IRP, Attributor &A)
2156       : AANonNull(IRP, A),
2157         NullIsDefined(NullPointerIsDefined(
2158             getAnchorScope(),
2159             getAssociatedValue().getType()->getPointerAddressSpace())) {}
2160 
2161   /// See AbstractAttribute::initialize(...).
2162   void initialize(Attributor &A) override {
2163     Value &V = getAssociatedValue();
2164     if (!NullIsDefined &&
2165         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
2166                 /* IgnoreSubsumingPositions */ false, &A)) {
2167       indicateOptimisticFixpoint();
2168       return;
2169     }
2170 
2171     if (isa<ConstantPointerNull>(V)) {
2172       indicatePessimisticFixpoint();
2173       return;
2174     }
2175 
2176     AANonNull::initialize(A);
2177 
2178     bool CanBeNull, CanBeFreed;
2179     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
2180                                          CanBeFreed)) {
2181       if (!CanBeNull) {
2182         indicateOptimisticFixpoint();
2183         return;
2184       }
2185     }
2186 
2187     if (isa<GlobalValue>(&getAssociatedValue())) {
2188       indicatePessimisticFixpoint();
2189       return;
2190     }
2191 
2192     if (Instruction *CtxI = getCtxI())
2193       followUsesInMBEC(*this, A, getState(), *CtxI);
2194   }
2195 
2196   /// See followUsesInMBEC
2197   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
2198                        AANonNull::StateType &State) {
2199     bool IsNonNull = false;
2200     bool TrackUse = false;
2201     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
2202                                        IsNonNull, TrackUse);
2203     State.setKnown(IsNonNull);
2204     return TrackUse;
2205   }
2206 
2207   /// See AbstractAttribute::getAsStr().
2208   const std::string getAsStr() const override {
2209     return getAssumed() ? "nonnull" : "may-null";
2210   }
2211 
2212   /// Flag to determine if the underlying value can be null and still allow
2213   /// valid accesses.
2214   const bool NullIsDefined;
2215 };
2216 
2217 /// NonNull attribute for a floating value.
2218 struct AANonNullFloating : public AANonNullImpl {
2219   AANonNullFloating(const IRPosition &IRP, Attributor &A)
2220       : AANonNullImpl(IRP, A) {}
2221 
2222   /// See AbstractAttribute::updateImpl(...).
2223   ChangeStatus updateImpl(Attributor &A) override {
2224     const DataLayout &DL = A.getDataLayout();
2225 
2226     DominatorTree *DT = nullptr;
2227     AssumptionCache *AC = nullptr;
2228     InformationCache &InfoCache = A.getInfoCache();
2229     if (const Function *Fn = getAnchorScope()) {
2230       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
2231       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
2232     }
2233 
2234     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
2235                             AANonNull::StateType &T, bool Stripped) -> bool {
2236       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
2237                                              DepClassTy::REQUIRED);
2238       if (!Stripped && this == &AA) {
2239         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
2240           T.indicatePessimisticFixpoint();
2241       } else {
2242         // Use abstract attribute information.
2243         const AANonNull::StateType &NS = AA.getState();
2244         T ^= NS;
2245       }
2246       return T.isValidState();
2247     };
2248 
2249     StateType T;
2250     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
2251                                           VisitValueCB, getCtxI()))
2252       return indicatePessimisticFixpoint();
2253 
2254     return clampStateAndIndicateChange(getState(), T);
2255   }
2256 
2257   /// See AbstractAttribute::trackStatistics()
2258   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2259 };
2260 
2261 /// NonNull attribute for function return value.
2262 struct AANonNullReturned final
2263     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
2264   AANonNullReturned(const IRPosition &IRP, Attributor &A)
2265       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
2266 
2267   /// See AbstractAttribute::getAsStr().
2268   const std::string getAsStr() const override {
2269     return getAssumed() ? "nonnull" : "may-null";
2270   }
2271 
2272   /// See AbstractAttribute::trackStatistics()
2273   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
2274 };
2275 
2276 /// NonNull attribute for function argument.
2277 struct AANonNullArgument final
2278     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
2279   AANonNullArgument(const IRPosition &IRP, Attributor &A)
2280       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
2281 
2282   /// See AbstractAttribute::trackStatistics()
2283   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
2284 };
2285 
2286 struct AANonNullCallSiteArgument final : AANonNullFloating {
2287   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
2288       : AANonNullFloating(IRP, A) {}
2289 
2290   /// See AbstractAttribute::trackStatistics()
2291   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
2292 };
2293 
2294 /// NonNull attribute for a call site return position.
2295 struct AANonNullCallSiteReturned final
2296     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
2297   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
2298       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
2299 
2300   /// See AbstractAttribute::trackStatistics()
2301   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
2302 };
2303 
2304 /// ------------------------ No-Recurse Attributes ----------------------------
2305 
2306 struct AANoRecurseImpl : public AANoRecurse {
2307   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
2308 
2309   /// See AbstractAttribute::getAsStr()
2310   const std::string getAsStr() const override {
2311     return getAssumed() ? "norecurse" : "may-recurse";
2312   }
2313 };
2314 
2315 struct AANoRecurseFunction final : AANoRecurseImpl {
2316   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
2317       : AANoRecurseImpl(IRP, A) {}
2318 
2319   /// See AbstractAttribute::initialize(...).
2320   void initialize(Attributor &A) override {
2321     AANoRecurseImpl::initialize(A);
2322     if (const Function *F = getAnchorScope())
2323       if (A.getInfoCache().getSccSize(*F) != 1)
2324         indicatePessimisticFixpoint();
2325   }
2326 
2327   /// See AbstractAttribute::updateImpl(...).
2328   ChangeStatus updateImpl(Attributor &A) override {
2329 
2330     // If all live call sites are known to be no-recurse, we are as well.
2331     auto CallSitePred = [&](AbstractCallSite ACS) {
2332       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2333           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
2334           DepClassTy::NONE);
2335       return NoRecurseAA.isKnownNoRecurse();
2336     };
2337     bool AllCallSitesKnown;
2338     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
2339       // If we know all call sites and all are known no-recurse, we are done.
2340       // If all known call sites, which might not be all that exist, are known
2341       // to be no-recurse, we are not done but we can continue to assume
2342       // no-recurse. If one of the call sites we have not visited will become
2343       // live, another update is triggered.
2344       if (AllCallSitesKnown)
2345         indicateOptimisticFixpoint();
2346       return ChangeStatus::UNCHANGED;
2347     }
2348 
2349     // If the above check does not hold anymore we look at the calls.
2350     auto CheckForNoRecurse = [&](Instruction &I) {
2351       const auto &CB = cast<CallBase>(I);
2352       if (CB.hasFnAttr(Attribute::NoRecurse))
2353         return true;
2354 
2355       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
2356           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
2357       if (!NoRecurseAA.isAssumedNoRecurse())
2358         return false;
2359 
2360       // Recursion to the same function
2361       if (CB.getCalledFunction() == getAnchorScope())
2362         return false;
2363 
2364       return true;
2365     };
2366 
2367     bool UsedAssumedInformation = false;
2368     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this,
2369                                            UsedAssumedInformation))
2370       return indicatePessimisticFixpoint();
2371     return ChangeStatus::UNCHANGED;
2372   }
2373 
2374   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
2375 };
2376 
2377 /// NoRecurse attribute deduction for a call sites.
2378 struct AANoRecurseCallSite final : AANoRecurseImpl {
2379   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
2380       : AANoRecurseImpl(IRP, A) {}
2381 
2382   /// See AbstractAttribute::initialize(...).
2383   void initialize(Attributor &A) override {
2384     AANoRecurseImpl::initialize(A);
2385     Function *F = getAssociatedFunction();
2386     if (!F || F->isDeclaration())
2387       indicatePessimisticFixpoint();
2388   }
2389 
2390   /// See AbstractAttribute::updateImpl(...).
2391   ChangeStatus updateImpl(Attributor &A) override {
2392     // TODO: Once we have call site specific value information we can provide
2393     //       call site specific liveness information and then it makes
2394     //       sense to specialize attributes for call sites arguments instead of
2395     //       redirecting requests to the callee argument.
2396     Function *F = getAssociatedFunction();
2397     const IRPosition &FnPos = IRPosition::function(*F);
2398     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
2399     return clampStateAndIndicateChange(getState(), FnAA.getState());
2400   }
2401 
2402   /// See AbstractAttribute::trackStatistics()
2403   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
2404 };
2405 
2406 /// -------------------- Undefined-Behavior Attributes ------------------------
2407 
2408 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
2409   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
2410       : AAUndefinedBehavior(IRP, A) {}
2411 
2412   /// See AbstractAttribute::updateImpl(...).
2413   // through a pointer (i.e. also branches etc.)
2414   ChangeStatus updateImpl(Attributor &A) override {
2415     const size_t UBPrevSize = KnownUBInsts.size();
2416     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
2417 
2418     auto InspectMemAccessInstForUB = [&](Instruction &I) {
2419       // Lang ref now states volatile store is not UB, let's skip them.
2420       if (I.isVolatile() && I.mayWriteToMemory())
2421         return true;
2422 
2423       // Skip instructions that are already saved.
2424       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2425         return true;
2426 
2427       // If we reach here, we know we have an instruction
2428       // that accesses memory through a pointer operand,
2429       // for which getPointerOperand() should give it to us.
2430       Value *PtrOp =
2431           const_cast<Value *>(getPointerOperand(&I, /* AllowVolatile */ true));
2432       assert(PtrOp &&
2433              "Expected pointer operand of memory accessing instruction");
2434 
2435       // Either we stopped and the appropriate action was taken,
2436       // or we got back a simplified value to continue.
2437       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2438       if (!SimplifiedPtrOp.hasValue() || !SimplifiedPtrOp.getValue())
2439         return true;
2440       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2441 
2442       // A memory access through a pointer is considered UB
2443       // only if the pointer has constant null value.
2444       // TODO: Expand it to not only check constant values.
2445       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2446         AssumedNoUBInsts.insert(&I);
2447         return true;
2448       }
2449       const Type *PtrTy = PtrOpVal->getType();
2450 
2451       // Because we only consider instructions inside functions,
2452       // assume that a parent function exists.
2453       const Function *F = I.getFunction();
2454 
2455       // A memory access using constant null pointer is only considered UB
2456       // if null pointer is _not_ defined for the target platform.
2457       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2458         AssumedNoUBInsts.insert(&I);
2459       else
2460         KnownUBInsts.insert(&I);
2461       return true;
2462     };
2463 
2464     auto InspectBrInstForUB = [&](Instruction &I) {
2465       // A conditional branch instruction is considered UB if it has `undef`
2466       // condition.
2467 
2468       // Skip instructions that are already saved.
2469       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2470         return true;
2471 
2472       // We know we have a branch instruction.
2473       auto *BrInst = cast<BranchInst>(&I);
2474 
2475       // Unconditional branches are never considered UB.
2476       if (BrInst->isUnconditional())
2477         return true;
2478 
2479       // Either we stopped and the appropriate action was taken,
2480       // or we got back a simplified value to continue.
2481       Optional<Value *> SimplifiedCond =
2482           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2483       if (!SimplifiedCond.hasValue() || !SimplifiedCond.getValue())
2484         return true;
2485       AssumedNoUBInsts.insert(&I);
2486       return true;
2487     };
2488 
2489     auto InspectCallSiteForUB = [&](Instruction &I) {
2490       // Check whether a callsite always cause UB or not
2491 
2492       // Skip instructions that are already saved.
2493       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2494         return true;
2495 
2496       // Check nonnull and noundef argument attribute violation for each
2497       // callsite.
2498       CallBase &CB = cast<CallBase>(I);
2499       Function *Callee = CB.getCalledFunction();
2500       if (!Callee)
2501         return true;
2502       for (unsigned idx = 0; idx < CB.arg_size(); idx++) {
2503         // If current argument is known to be simplified to null pointer and the
2504         // corresponding argument position is known to have nonnull attribute,
2505         // the argument is poison. Furthermore, if the argument is poison and
2506         // the position is known to have noundef attriubte, this callsite is
2507         // considered UB.
2508         if (idx >= Callee->arg_size())
2509           break;
2510         Value *ArgVal = CB.getArgOperand(idx);
2511         if (!ArgVal)
2512           continue;
2513         // Here, we handle three cases.
2514         //   (1) Not having a value means it is dead. (we can replace the value
2515         //       with undef)
2516         //   (2) Simplified to undef. The argument violate noundef attriubte.
2517         //   (3) Simplified to null pointer where known to be nonnull.
2518         //       The argument is a poison value and violate noundef attribute.
2519         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2520         auto &NoUndefAA =
2521             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2522         if (!NoUndefAA.isKnownNoUndef())
2523           continue;
2524         bool UsedAssumedInformation = false;
2525         Optional<Value *> SimplifiedVal = A.getAssumedSimplified(
2526             IRPosition::value(*ArgVal), *this, UsedAssumedInformation);
2527         if (UsedAssumedInformation)
2528           continue;
2529         if (SimplifiedVal.hasValue() && !SimplifiedVal.getValue())
2530           return true;
2531         if (!SimplifiedVal.hasValue() ||
2532             isa<UndefValue>(*SimplifiedVal.getValue())) {
2533           KnownUBInsts.insert(&I);
2534           continue;
2535         }
2536         if (!ArgVal->getType()->isPointerTy() ||
2537             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2538           continue;
2539         auto &NonNullAA =
2540             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2541         if (NonNullAA.isKnownNonNull())
2542           KnownUBInsts.insert(&I);
2543       }
2544       return true;
2545     };
2546 
2547     auto InspectReturnInstForUB =
2548         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2549           // Check if a return instruction always cause UB or not
2550           // Note: It is guaranteed that the returned position of the anchor
2551           //       scope has noundef attribute when this is called.
2552           //       We also ensure the return position is not "assumed dead"
2553           //       because the returned value was then potentially simplified to
2554           //       `undef` in AAReturnedValues without removing the `noundef`
2555           //       attribute yet.
2556 
2557           // When the returned position has noundef attriubte, UB occur in the
2558           // following cases.
2559           //   (1) Returned value is known to be undef.
2560           //   (2) The value is known to be a null pointer and the returned
2561           //       position has nonnull attribute (because the returned value is
2562           //       poison).
2563           bool FoundUB = false;
2564           if (isa<UndefValue>(V)) {
2565             FoundUB = true;
2566           } else {
2567             if (isa<ConstantPointerNull>(V)) {
2568               auto &NonNullAA = A.getAAFor<AANonNull>(
2569                   *this, IRPosition::returned(*getAnchorScope()),
2570                   DepClassTy::NONE);
2571               if (NonNullAA.isKnownNonNull())
2572                 FoundUB = true;
2573             }
2574           }
2575 
2576           if (FoundUB)
2577             for (ReturnInst *RI : RetInsts)
2578               KnownUBInsts.insert(RI);
2579           return true;
2580         };
2581 
2582     bool UsedAssumedInformation = false;
2583     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2584                               {Instruction::Load, Instruction::Store,
2585                                Instruction::AtomicCmpXchg,
2586                                Instruction::AtomicRMW},
2587                               UsedAssumedInformation,
2588                               /* CheckBBLivenessOnly */ true);
2589     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2590                               UsedAssumedInformation,
2591                               /* CheckBBLivenessOnly */ true);
2592     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this,
2593                                       UsedAssumedInformation);
2594 
2595     // If the returned position of the anchor scope has noundef attriubte, check
2596     // all returned instructions.
2597     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2598       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2599       if (!A.isAssumedDead(ReturnIRP, this, nullptr, UsedAssumedInformation)) {
2600         auto &RetPosNoUndefAA =
2601             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2602         if (RetPosNoUndefAA.isKnownNoUndef())
2603           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2604                                                     *this);
2605       }
2606     }
2607 
2608     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2609         UBPrevSize != KnownUBInsts.size())
2610       return ChangeStatus::CHANGED;
2611     return ChangeStatus::UNCHANGED;
2612   }
2613 
2614   bool isKnownToCauseUB(Instruction *I) const override {
2615     return KnownUBInsts.count(I);
2616   }
2617 
2618   bool isAssumedToCauseUB(Instruction *I) const override {
2619     // In simple words, if an instruction is not in the assumed to _not_
2620     // cause UB, then it is assumed UB (that includes those
2621     // in the KnownUBInsts set). The rest is boilerplate
2622     // is to ensure that it is one of the instructions we test
2623     // for UB.
2624 
2625     switch (I->getOpcode()) {
2626     case Instruction::Load:
2627     case Instruction::Store:
2628     case Instruction::AtomicCmpXchg:
2629     case Instruction::AtomicRMW:
2630       return !AssumedNoUBInsts.count(I);
2631     case Instruction::Br: {
2632       auto BrInst = cast<BranchInst>(I);
2633       if (BrInst->isUnconditional())
2634         return false;
2635       return !AssumedNoUBInsts.count(I);
2636     } break;
2637     default:
2638       return false;
2639     }
2640     return false;
2641   }
2642 
2643   ChangeStatus manifest(Attributor &A) override {
2644     if (KnownUBInsts.empty())
2645       return ChangeStatus::UNCHANGED;
2646     for (Instruction *I : KnownUBInsts)
2647       A.changeToUnreachableAfterManifest(I);
2648     return ChangeStatus::CHANGED;
2649   }
2650 
2651   /// See AbstractAttribute::getAsStr()
2652   const std::string getAsStr() const override {
2653     return getAssumed() ? "undefined-behavior" : "no-ub";
2654   }
2655 
2656   /// Note: The correctness of this analysis depends on the fact that the
2657   /// following 2 sets will stop changing after some point.
2658   /// "Change" here means that their size changes.
2659   /// The size of each set is monotonically increasing
2660   /// (we only add items to them) and it is upper bounded by the number of
2661   /// instructions in the processed function (we can never save more
2662   /// elements in either set than this number). Hence, at some point,
2663   /// they will stop increasing.
2664   /// Consequently, at some point, both sets will have stopped
2665   /// changing, effectively making the analysis reach a fixpoint.
2666 
2667   /// Note: These 2 sets are disjoint and an instruction can be considered
2668   /// one of 3 things:
2669   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2670   ///    the KnownUBInsts set.
2671   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2672   ///    has a reason to assume it).
2673   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2674   ///    could not find a reason to assume or prove that it can cause UB,
2675   ///    hence it assumes it doesn't. We have a set for these instructions
2676   ///    so that we don't reprocess them in every update.
2677   ///    Note however that instructions in this set may cause UB.
2678 
2679 protected:
2680   /// A set of all live instructions _known_ to cause UB.
2681   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2682 
2683 private:
2684   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2685   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2686 
2687   // Should be called on updates in which if we're processing an instruction
2688   // \p I that depends on a value \p V, one of the following has to happen:
2689   // - If the value is assumed, then stop.
2690   // - If the value is known but undef, then consider it UB.
2691   // - Otherwise, do specific processing with the simplified value.
2692   // We return None in the first 2 cases to signify that an appropriate
2693   // action was taken and the caller should stop.
2694   // Otherwise, we return the simplified value that the caller should
2695   // use for specific processing.
2696   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, Value *V,
2697                                          Instruction *I) {
2698     bool UsedAssumedInformation = false;
2699     Optional<Value *> SimplifiedV = A.getAssumedSimplified(
2700         IRPosition::value(*V), *this, UsedAssumedInformation);
2701     if (!UsedAssumedInformation) {
2702       // Don't depend on assumed values.
2703       if (!SimplifiedV.hasValue()) {
2704         // If it is known (which we tested above) but it doesn't have a value,
2705         // then we can assume `undef` and hence the instruction is UB.
2706         KnownUBInsts.insert(I);
2707         return llvm::None;
2708       }
2709       if (!SimplifiedV.getValue())
2710         return nullptr;
2711       V = *SimplifiedV;
2712     }
2713     if (isa<UndefValue>(V)) {
2714       KnownUBInsts.insert(I);
2715       return llvm::None;
2716     }
2717     return V;
2718   }
2719 };
2720 
2721 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2722   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2723       : AAUndefinedBehaviorImpl(IRP, A) {}
2724 
2725   /// See AbstractAttribute::trackStatistics()
2726   void trackStatistics() const override {
2727     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2728                "Number of instructions known to have UB");
2729     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2730         KnownUBInsts.size();
2731   }
2732 };
2733 
2734 /// ------------------------ Will-Return Attributes ----------------------------
2735 
2736 // Helper function that checks whether a function has any cycle which we don't
2737 // know if it is bounded or not.
2738 // Loops with maximum trip count are considered bounded, any other cycle not.
2739 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2740   ScalarEvolution *SE =
2741       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2742   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2743   // If either SCEV or LoopInfo is not available for the function then we assume
2744   // any cycle to be unbounded cycle.
2745   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2746   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2747   if (!SE || !LI) {
2748     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2749       if (SCCI.hasCycle())
2750         return true;
2751     return false;
2752   }
2753 
2754   // If there's irreducible control, the function may contain non-loop cycles.
2755   if (mayContainIrreducibleControl(F, LI))
2756     return true;
2757 
2758   // Any loop that does not have a max trip count is considered unbounded cycle.
2759   for (auto *L : LI->getLoopsInPreorder()) {
2760     if (!SE->getSmallConstantMaxTripCount(L))
2761       return true;
2762   }
2763   return false;
2764 }
2765 
2766 struct AAWillReturnImpl : public AAWillReturn {
2767   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2768       : AAWillReturn(IRP, A) {}
2769 
2770   /// See AbstractAttribute::initialize(...).
2771   void initialize(Attributor &A) override {
2772     AAWillReturn::initialize(A);
2773 
2774     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2775       indicateOptimisticFixpoint();
2776       return;
2777     }
2778   }
2779 
2780   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2781   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2782     // Check for `mustprogress` in the scope and the associated function which
2783     // might be different if this is a call site.
2784     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2785         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2786       return false;
2787 
2788     const auto &MemAA =
2789         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2790     if (!MemAA.isAssumedReadOnly())
2791       return false;
2792     if (KnownOnly && !MemAA.isKnownReadOnly())
2793       return false;
2794     if (!MemAA.isKnownReadOnly())
2795       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2796 
2797     return true;
2798   }
2799 
2800   /// See AbstractAttribute::updateImpl(...).
2801   ChangeStatus updateImpl(Attributor &A) override {
2802     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2803       return ChangeStatus::UNCHANGED;
2804 
2805     auto CheckForWillReturn = [&](Instruction &I) {
2806       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2807       const auto &WillReturnAA =
2808           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2809       if (WillReturnAA.isKnownWillReturn())
2810         return true;
2811       if (!WillReturnAA.isAssumedWillReturn())
2812         return false;
2813       const auto &NoRecurseAA =
2814           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2815       return NoRecurseAA.isAssumedNoRecurse();
2816     };
2817 
2818     bool UsedAssumedInformation = false;
2819     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this,
2820                                            UsedAssumedInformation))
2821       return indicatePessimisticFixpoint();
2822 
2823     return ChangeStatus::UNCHANGED;
2824   }
2825 
2826   /// See AbstractAttribute::getAsStr()
2827   const std::string getAsStr() const override {
2828     return getAssumed() ? "willreturn" : "may-noreturn";
2829   }
2830 };
2831 
2832 struct AAWillReturnFunction final : AAWillReturnImpl {
2833   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2834       : AAWillReturnImpl(IRP, A) {}
2835 
2836   /// See AbstractAttribute::initialize(...).
2837   void initialize(Attributor &A) override {
2838     AAWillReturnImpl::initialize(A);
2839 
2840     Function *F = getAnchorScope();
2841     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2842       indicatePessimisticFixpoint();
2843   }
2844 
2845   /// See AbstractAttribute::trackStatistics()
2846   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2847 };
2848 
2849 /// WillReturn attribute deduction for a call sites.
2850 struct AAWillReturnCallSite final : AAWillReturnImpl {
2851   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2852       : AAWillReturnImpl(IRP, A) {}
2853 
2854   /// See AbstractAttribute::initialize(...).
2855   void initialize(Attributor &A) override {
2856     AAWillReturnImpl::initialize(A);
2857     Function *F = getAssociatedFunction();
2858     if (!F || !A.isFunctionIPOAmendable(*F))
2859       indicatePessimisticFixpoint();
2860   }
2861 
2862   /// See AbstractAttribute::updateImpl(...).
2863   ChangeStatus updateImpl(Attributor &A) override {
2864     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2865       return ChangeStatus::UNCHANGED;
2866 
2867     // TODO: Once we have call site specific value information we can provide
2868     //       call site specific liveness information and then it makes
2869     //       sense to specialize attributes for call sites arguments instead of
2870     //       redirecting requests to the callee argument.
2871     Function *F = getAssociatedFunction();
2872     const IRPosition &FnPos = IRPosition::function(*F);
2873     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2874     return clampStateAndIndicateChange(getState(), FnAA.getState());
2875   }
2876 
2877   /// See AbstractAttribute::trackStatistics()
2878   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2879 };
2880 
2881 /// -------------------AAReachability Attribute--------------------------
2882 
2883 struct AAReachabilityImpl : AAReachability {
2884   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2885       : AAReachability(IRP, A) {}
2886 
2887   const std::string getAsStr() const override {
2888     // TODO: Return the number of reachable queries.
2889     return "reachable";
2890   }
2891 
2892   /// See AbstractAttribute::updateImpl(...).
2893   ChangeStatus updateImpl(Attributor &A) override {
2894     return ChangeStatus::UNCHANGED;
2895   }
2896 };
2897 
2898 struct AAReachabilityFunction final : public AAReachabilityImpl {
2899   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2900       : AAReachabilityImpl(IRP, A) {}
2901 
2902   /// See AbstractAttribute::trackStatistics()
2903   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2904 };
2905 
2906 /// ------------------------ NoAlias Argument Attribute ------------------------
2907 
2908 struct AANoAliasImpl : AANoAlias {
2909   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2910     assert(getAssociatedType()->isPointerTy() &&
2911            "Noalias is a pointer attribute");
2912   }
2913 
2914   const std::string getAsStr() const override {
2915     return getAssumed() ? "noalias" : "may-alias";
2916   }
2917 };
2918 
2919 /// NoAlias attribute for a floating value.
2920 struct AANoAliasFloating final : AANoAliasImpl {
2921   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2922       : AANoAliasImpl(IRP, A) {}
2923 
2924   /// See AbstractAttribute::initialize(...).
2925   void initialize(Attributor &A) override {
2926     AANoAliasImpl::initialize(A);
2927     Value *Val = &getAssociatedValue();
2928     do {
2929       CastInst *CI = dyn_cast<CastInst>(Val);
2930       if (!CI)
2931         break;
2932       Value *Base = CI->getOperand(0);
2933       if (!Base->hasOneUse())
2934         break;
2935       Val = Base;
2936     } while (true);
2937 
2938     if (!Val->getType()->isPointerTy()) {
2939       indicatePessimisticFixpoint();
2940       return;
2941     }
2942 
2943     if (isa<AllocaInst>(Val))
2944       indicateOptimisticFixpoint();
2945     else if (isa<ConstantPointerNull>(Val) &&
2946              !NullPointerIsDefined(getAnchorScope(),
2947                                    Val->getType()->getPointerAddressSpace()))
2948       indicateOptimisticFixpoint();
2949     else if (Val != &getAssociatedValue()) {
2950       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2951           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2952       if (ValNoAliasAA.isKnownNoAlias())
2953         indicateOptimisticFixpoint();
2954     }
2955   }
2956 
2957   /// See AbstractAttribute::updateImpl(...).
2958   ChangeStatus updateImpl(Attributor &A) override {
2959     // TODO: Implement this.
2960     return indicatePessimisticFixpoint();
2961   }
2962 
2963   /// See AbstractAttribute::trackStatistics()
2964   void trackStatistics() const override {
2965     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2966   }
2967 };
2968 
2969 /// NoAlias attribute for an argument.
2970 struct AANoAliasArgument final
2971     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2972   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2973   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2974 
2975   /// See AbstractAttribute::initialize(...).
2976   void initialize(Attributor &A) override {
2977     Base::initialize(A);
2978     // See callsite argument attribute and callee argument attribute.
2979     if (hasAttr({Attribute::ByVal}))
2980       indicateOptimisticFixpoint();
2981   }
2982 
2983   /// See AbstractAttribute::update(...).
2984   ChangeStatus updateImpl(Attributor &A) override {
2985     // We have to make sure no-alias on the argument does not break
2986     // synchronization when this is a callback argument, see also [1] below.
2987     // If synchronization cannot be affected, we delegate to the base updateImpl
2988     // function, otherwise we give up for now.
2989 
2990     // If the function is no-sync, no-alias cannot break synchronization.
2991     const auto &NoSyncAA =
2992         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2993                              DepClassTy::OPTIONAL);
2994     if (NoSyncAA.isAssumedNoSync())
2995       return Base::updateImpl(A);
2996 
2997     // If the argument is read-only, no-alias cannot break synchronization.
2998     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2999         *this, getIRPosition(), DepClassTy::OPTIONAL);
3000     if (MemBehaviorAA.isAssumedReadOnly())
3001       return Base::updateImpl(A);
3002 
3003     // If the argument is never passed through callbacks, no-alias cannot break
3004     // synchronization.
3005     bool AllCallSitesKnown;
3006     if (A.checkForAllCallSites(
3007             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
3008             true, AllCallSitesKnown))
3009       return Base::updateImpl(A);
3010 
3011     // TODO: add no-alias but make sure it doesn't break synchronization by
3012     // introducing fake uses. See:
3013     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
3014     //     International Workshop on OpenMP 2018,
3015     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
3016 
3017     return indicatePessimisticFixpoint();
3018   }
3019 
3020   /// See AbstractAttribute::trackStatistics()
3021   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
3022 };
3023 
3024 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
3025   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
3026       : AANoAliasImpl(IRP, A) {}
3027 
3028   /// See AbstractAttribute::initialize(...).
3029   void initialize(Attributor &A) override {
3030     // See callsite argument attribute and callee argument attribute.
3031     const auto &CB = cast<CallBase>(getAnchorValue());
3032     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
3033       indicateOptimisticFixpoint();
3034     Value &Val = getAssociatedValue();
3035     if (isa<ConstantPointerNull>(Val) &&
3036         !NullPointerIsDefined(getAnchorScope(),
3037                               Val.getType()->getPointerAddressSpace()))
3038       indicateOptimisticFixpoint();
3039   }
3040 
3041   /// Determine if the underlying value may alias with the call site argument
3042   /// \p OtherArgNo of \p ICS (= the underlying call site).
3043   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
3044                             const AAMemoryBehavior &MemBehaviorAA,
3045                             const CallBase &CB, unsigned OtherArgNo) {
3046     // We do not need to worry about aliasing with the underlying IRP.
3047     if (this->getCalleeArgNo() == (int)OtherArgNo)
3048       return false;
3049 
3050     // If it is not a pointer or pointer vector we do not alias.
3051     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
3052     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
3053       return false;
3054 
3055     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
3056         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
3057 
3058     // If the argument is readnone, there is no read-write aliasing.
3059     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
3060       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3061       return false;
3062     }
3063 
3064     // If the argument is readonly and the underlying value is readonly, there
3065     // is no read-write aliasing.
3066     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
3067     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
3068       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3069       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
3070       return false;
3071     }
3072 
3073     // We have to utilize actual alias analysis queries so we need the object.
3074     if (!AAR)
3075       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
3076 
3077     // Try to rule it out at the call site.
3078     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
3079     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
3080                          "callsite arguments: "
3081                       << getAssociatedValue() << " " << *ArgOp << " => "
3082                       << (IsAliasing ? "" : "no-") << "alias \n");
3083 
3084     return IsAliasing;
3085   }
3086 
3087   bool
3088   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
3089                                          const AAMemoryBehavior &MemBehaviorAA,
3090                                          const AANoAlias &NoAliasAA) {
3091     // We can deduce "noalias" if the following conditions hold.
3092     // (i)   Associated value is assumed to be noalias in the definition.
3093     // (ii)  Associated value is assumed to be no-capture in all the uses
3094     //       possibly executed before this callsite.
3095     // (iii) There is no other pointer argument which could alias with the
3096     //       value.
3097 
3098     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
3099     if (!AssociatedValueIsNoAliasAtDef) {
3100       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
3101                         << " is not no-alias at the definition\n");
3102       return false;
3103     }
3104 
3105     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
3106 
3107     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3108     const Function *ScopeFn = VIRP.getAnchorScope();
3109     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
3110     // Check whether the value is captured in the scope using AANoCapture.
3111     //      Look at CFG and check only uses possibly executed before this
3112     //      callsite.
3113     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
3114       Instruction *UserI = cast<Instruction>(U.getUser());
3115 
3116       // If UserI is the curr instruction and there is a single potential use of
3117       // the value in UserI we allow the use.
3118       // TODO: We should inspect the operands and allow those that cannot alias
3119       //       with the value.
3120       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
3121         return true;
3122 
3123       if (ScopeFn) {
3124         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
3125             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
3126 
3127         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
3128           return true;
3129 
3130         if (auto *CB = dyn_cast<CallBase>(UserI)) {
3131           if (CB->isArgOperand(&U)) {
3132 
3133             unsigned ArgNo = CB->getArgOperandNo(&U);
3134 
3135             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
3136                 *this, IRPosition::callsite_argument(*CB, ArgNo),
3137                 DepClassTy::OPTIONAL);
3138 
3139             if (NoCaptureAA.isAssumedNoCapture())
3140               return true;
3141           }
3142         }
3143       }
3144 
3145       // For cases which can potentially have more users
3146       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
3147           isa<SelectInst>(U)) {
3148         Follow = true;
3149         return true;
3150       }
3151 
3152       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
3153       return false;
3154     };
3155 
3156     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
3157       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
3158         LLVM_DEBUG(
3159             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
3160                    << " cannot be noalias as it is potentially captured\n");
3161         return false;
3162       }
3163     }
3164     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
3165 
3166     // Check there is no other pointer argument which could alias with the
3167     // value passed at this call site.
3168     // TODO: AbstractCallSite
3169     const auto &CB = cast<CallBase>(getAnchorValue());
3170     for (unsigned OtherArgNo = 0; OtherArgNo < CB.arg_size(); OtherArgNo++)
3171       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
3172         return false;
3173 
3174     return true;
3175   }
3176 
3177   /// See AbstractAttribute::updateImpl(...).
3178   ChangeStatus updateImpl(Attributor &A) override {
3179     // If the argument is readnone we are done as there are no accesses via the
3180     // argument.
3181     auto &MemBehaviorAA =
3182         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
3183     if (MemBehaviorAA.isAssumedReadNone()) {
3184       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3185       return ChangeStatus::UNCHANGED;
3186     }
3187 
3188     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
3189     const auto &NoAliasAA =
3190         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
3191 
3192     AAResults *AAR = nullptr;
3193     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
3194                                                NoAliasAA)) {
3195       LLVM_DEBUG(
3196           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
3197       return ChangeStatus::UNCHANGED;
3198     }
3199 
3200     return indicatePessimisticFixpoint();
3201   }
3202 
3203   /// See AbstractAttribute::trackStatistics()
3204   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
3205 };
3206 
3207 /// NoAlias attribute for function return value.
3208 struct AANoAliasReturned final : AANoAliasImpl {
3209   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
3210       : AANoAliasImpl(IRP, A) {}
3211 
3212   /// See AbstractAttribute::initialize(...).
3213   void initialize(Attributor &A) override {
3214     AANoAliasImpl::initialize(A);
3215     Function *F = getAssociatedFunction();
3216     if (!F || F->isDeclaration())
3217       indicatePessimisticFixpoint();
3218   }
3219 
3220   /// See AbstractAttribute::updateImpl(...).
3221   virtual ChangeStatus updateImpl(Attributor &A) override {
3222 
3223     auto CheckReturnValue = [&](Value &RV) -> bool {
3224       if (Constant *C = dyn_cast<Constant>(&RV))
3225         if (C->isNullValue() || isa<UndefValue>(C))
3226           return true;
3227 
3228       /// For now, we can only deduce noalias if we have call sites.
3229       /// FIXME: add more support.
3230       if (!isa<CallBase>(&RV))
3231         return false;
3232 
3233       const IRPosition &RVPos = IRPosition::value(RV);
3234       const auto &NoAliasAA =
3235           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
3236       if (!NoAliasAA.isAssumedNoAlias())
3237         return false;
3238 
3239       const auto &NoCaptureAA =
3240           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
3241       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
3242     };
3243 
3244     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
3245       return indicatePessimisticFixpoint();
3246 
3247     return ChangeStatus::UNCHANGED;
3248   }
3249 
3250   /// See AbstractAttribute::trackStatistics()
3251   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
3252 };
3253 
3254 /// NoAlias attribute deduction for a call site return value.
3255 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
3256   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
3257       : AANoAliasImpl(IRP, A) {}
3258 
3259   /// See AbstractAttribute::initialize(...).
3260   void initialize(Attributor &A) override {
3261     AANoAliasImpl::initialize(A);
3262     Function *F = getAssociatedFunction();
3263     if (!F || F->isDeclaration())
3264       indicatePessimisticFixpoint();
3265   }
3266 
3267   /// See AbstractAttribute::updateImpl(...).
3268   ChangeStatus updateImpl(Attributor &A) override {
3269     // TODO: Once we have call site specific value information we can provide
3270     //       call site specific liveness information and then it makes
3271     //       sense to specialize attributes for call sites arguments instead of
3272     //       redirecting requests to the callee argument.
3273     Function *F = getAssociatedFunction();
3274     const IRPosition &FnPos = IRPosition::returned(*F);
3275     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
3276     return clampStateAndIndicateChange(getState(), FnAA.getState());
3277   }
3278 
3279   /// See AbstractAttribute::trackStatistics()
3280   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
3281 };
3282 
3283 /// -------------------AAIsDead Function Attribute-----------------------
3284 
3285 struct AAIsDeadValueImpl : public AAIsDead {
3286   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3287 
3288   /// See AAIsDead::isAssumedDead().
3289   bool isAssumedDead() const override { return isAssumed(IS_DEAD); }
3290 
3291   /// See AAIsDead::isKnownDead().
3292   bool isKnownDead() const override { return isKnown(IS_DEAD); }
3293 
3294   /// See AAIsDead::isAssumedDead(BasicBlock *).
3295   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
3296 
3297   /// See AAIsDead::isKnownDead(BasicBlock *).
3298   bool isKnownDead(const BasicBlock *BB) const override { return false; }
3299 
3300   /// See AAIsDead::isAssumedDead(Instruction *I).
3301   bool isAssumedDead(const Instruction *I) const override {
3302     return I == getCtxI() && isAssumedDead();
3303   }
3304 
3305   /// See AAIsDead::isKnownDead(Instruction *I).
3306   bool isKnownDead(const Instruction *I) const override {
3307     return isAssumedDead(I) && isKnownDead();
3308   }
3309 
3310   /// See AbstractAttribute::getAsStr().
3311   const std::string getAsStr() const override {
3312     return isAssumedDead() ? "assumed-dead" : "assumed-live";
3313   }
3314 
3315   /// Check if all uses are assumed dead.
3316   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
3317     // Callers might not check the type, void has no uses.
3318     if (V.getType()->isVoidTy())
3319       return true;
3320 
3321     // If we replace a value with a constant there are no uses left afterwards.
3322     if (!isa<Constant>(V)) {
3323       bool UsedAssumedInformation = false;
3324       Optional<Constant *> C =
3325           A.getAssumedConstant(V, *this, UsedAssumedInformation);
3326       if (!C.hasValue() || *C)
3327         return true;
3328     }
3329 
3330     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
3331     // Explicitly set the dependence class to required because we want a long
3332     // chain of N dependent instructions to be considered live as soon as one is
3333     // without going through N update cycles. This is not required for
3334     // correctness.
3335     return A.checkForAllUses(UsePred, *this, V, /* CheckBBLivenessOnly */ false,
3336                              DepClassTy::REQUIRED);
3337   }
3338 
3339   /// Determine if \p I is assumed to be side-effect free.
3340   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
3341     if (!I || wouldInstructionBeTriviallyDead(I))
3342       return true;
3343 
3344     auto *CB = dyn_cast<CallBase>(I);
3345     if (!CB || isa<IntrinsicInst>(CB))
3346       return false;
3347 
3348     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
3349     const auto &NoUnwindAA =
3350         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
3351     if (!NoUnwindAA.isAssumedNoUnwind())
3352       return false;
3353     if (!NoUnwindAA.isKnownNoUnwind())
3354       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
3355 
3356     const auto &MemBehaviorAA =
3357         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
3358     if (MemBehaviorAA.isAssumedReadOnly()) {
3359       if (!MemBehaviorAA.isKnownReadOnly())
3360         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
3361       return true;
3362     }
3363     return false;
3364   }
3365 };
3366 
3367 struct AAIsDeadFloating : public AAIsDeadValueImpl {
3368   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
3369       : AAIsDeadValueImpl(IRP, A) {}
3370 
3371   /// See AbstractAttribute::initialize(...).
3372   void initialize(Attributor &A) override {
3373     if (isa<UndefValue>(getAssociatedValue())) {
3374       indicatePessimisticFixpoint();
3375       return;
3376     }
3377 
3378     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3379     if (!isAssumedSideEffectFree(A, I)) {
3380       if (!isa_and_nonnull<StoreInst>(I))
3381         indicatePessimisticFixpoint();
3382       else
3383         removeAssumedBits(HAS_NO_EFFECT);
3384     }
3385   }
3386 
3387   bool isDeadStore(Attributor &A, StoreInst &SI) {
3388     // Lang ref now states volatile store is not UB/dead, let's skip them.
3389     if (SI.isVolatile())
3390       return false;
3391 
3392     bool UsedAssumedInformation = false;
3393     SmallSetVector<Value *, 4> PotentialCopies;
3394     if (!AA::getPotentialCopiesOfStoredValue(A, SI, PotentialCopies, *this,
3395                                              UsedAssumedInformation))
3396       return false;
3397     return llvm::all_of(PotentialCopies, [&](Value *V) {
3398       return A.isAssumedDead(IRPosition::value(*V), this, nullptr,
3399                              UsedAssumedInformation);
3400     });
3401   }
3402 
3403   /// See AbstractAttribute::updateImpl(...).
3404   ChangeStatus updateImpl(Attributor &A) override {
3405     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
3406     if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3407       if (!isDeadStore(A, *SI))
3408         return indicatePessimisticFixpoint();
3409     } else {
3410       if (!isAssumedSideEffectFree(A, I))
3411         return indicatePessimisticFixpoint();
3412       if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3413         return indicatePessimisticFixpoint();
3414     }
3415     return ChangeStatus::UNCHANGED;
3416   }
3417 
3418   /// See AbstractAttribute::manifest(...).
3419   ChangeStatus manifest(Attributor &A) override {
3420     Value &V = getAssociatedValue();
3421     if (auto *I = dyn_cast<Instruction>(&V)) {
3422       // If we get here we basically know the users are all dead. We check if
3423       // isAssumedSideEffectFree returns true here again because it might not be
3424       // the case and only the users are dead but the instruction (=call) is
3425       // still needed.
3426       if (isa<StoreInst>(I) ||
3427           (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I))) {
3428         A.deleteAfterManifest(*I);
3429         return ChangeStatus::CHANGED;
3430       }
3431     }
3432     if (V.use_empty())
3433       return ChangeStatus::UNCHANGED;
3434 
3435     bool UsedAssumedInformation = false;
3436     Optional<Constant *> C =
3437         A.getAssumedConstant(V, *this, UsedAssumedInformation);
3438     if (C.hasValue() && C.getValue())
3439       return ChangeStatus::UNCHANGED;
3440 
3441     // Replace the value with undef as it is dead but keep droppable uses around
3442     // as they provide information we don't want to give up on just yet.
3443     UndefValue &UV = *UndefValue::get(V.getType());
3444     bool AnyChange =
3445         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
3446     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3447   }
3448 
3449   /// See AbstractAttribute::trackStatistics()
3450   void trackStatistics() const override {
3451     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
3452   }
3453 };
3454 
3455 struct AAIsDeadArgument : public AAIsDeadFloating {
3456   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
3457       : AAIsDeadFloating(IRP, A) {}
3458 
3459   /// See AbstractAttribute::initialize(...).
3460   void initialize(Attributor &A) override {
3461     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
3462       indicatePessimisticFixpoint();
3463   }
3464 
3465   /// See AbstractAttribute::manifest(...).
3466   ChangeStatus manifest(Attributor &A) override {
3467     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
3468     Argument &Arg = *getAssociatedArgument();
3469     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
3470       if (A.registerFunctionSignatureRewrite(
3471               Arg, /* ReplacementTypes */ {},
3472               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
3473               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3474         Arg.dropDroppableUses();
3475         return ChangeStatus::CHANGED;
3476       }
3477     return Changed;
3478   }
3479 
3480   /// See AbstractAttribute::trackStatistics()
3481   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3482 };
3483 
3484 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3485   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3486       : AAIsDeadValueImpl(IRP, A) {}
3487 
3488   /// See AbstractAttribute::initialize(...).
3489   void initialize(Attributor &A) override {
3490     if (isa<UndefValue>(getAssociatedValue()))
3491       indicatePessimisticFixpoint();
3492   }
3493 
3494   /// See AbstractAttribute::updateImpl(...).
3495   ChangeStatus updateImpl(Attributor &A) override {
3496     // TODO: Once we have call site specific value information we can provide
3497     //       call site specific liveness information and then it makes
3498     //       sense to specialize attributes for call sites arguments instead of
3499     //       redirecting requests to the callee argument.
3500     Argument *Arg = getAssociatedArgument();
3501     if (!Arg)
3502       return indicatePessimisticFixpoint();
3503     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3504     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3505     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3506   }
3507 
3508   /// See AbstractAttribute::manifest(...).
3509   ChangeStatus manifest(Attributor &A) override {
3510     CallBase &CB = cast<CallBase>(getAnchorValue());
3511     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3512     assert(!isa<UndefValue>(U.get()) &&
3513            "Expected undef values to be filtered out!");
3514     UndefValue &UV = *UndefValue::get(U->getType());
3515     if (A.changeUseAfterManifest(U, UV))
3516       return ChangeStatus::CHANGED;
3517     return ChangeStatus::UNCHANGED;
3518   }
3519 
3520   /// See AbstractAttribute::trackStatistics()
3521   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3522 };
3523 
3524 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3525   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3526       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3527 
3528   /// See AAIsDead::isAssumedDead().
3529   bool isAssumedDead() const override {
3530     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3531   }
3532 
3533   /// See AbstractAttribute::initialize(...).
3534   void initialize(Attributor &A) override {
3535     if (isa<UndefValue>(getAssociatedValue())) {
3536       indicatePessimisticFixpoint();
3537       return;
3538     }
3539 
3540     // We track this separately as a secondary state.
3541     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3542   }
3543 
3544   /// See AbstractAttribute::updateImpl(...).
3545   ChangeStatus updateImpl(Attributor &A) override {
3546     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3547     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3548       IsAssumedSideEffectFree = false;
3549       Changed = ChangeStatus::CHANGED;
3550     }
3551     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3552       return indicatePessimisticFixpoint();
3553     return Changed;
3554   }
3555 
3556   /// See AbstractAttribute::trackStatistics()
3557   void trackStatistics() const override {
3558     if (IsAssumedSideEffectFree)
3559       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3560     else
3561       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3562   }
3563 
3564   /// See AbstractAttribute::getAsStr().
3565   const std::string getAsStr() const override {
3566     return isAssumedDead()
3567                ? "assumed-dead"
3568                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3569   }
3570 
3571 private:
3572   bool IsAssumedSideEffectFree;
3573 };
3574 
3575 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3576   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3577       : AAIsDeadValueImpl(IRP, A) {}
3578 
3579   /// See AbstractAttribute::updateImpl(...).
3580   ChangeStatus updateImpl(Attributor &A) override {
3581 
3582     bool UsedAssumedInformation = false;
3583     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3584                               {Instruction::Ret}, UsedAssumedInformation);
3585 
3586     auto PredForCallSite = [&](AbstractCallSite ACS) {
3587       if (ACS.isCallbackCall() || !ACS.getInstruction())
3588         return false;
3589       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3590     };
3591 
3592     bool AllCallSitesKnown;
3593     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3594                                 AllCallSitesKnown))
3595       return indicatePessimisticFixpoint();
3596 
3597     return ChangeStatus::UNCHANGED;
3598   }
3599 
3600   /// See AbstractAttribute::manifest(...).
3601   ChangeStatus manifest(Attributor &A) override {
3602     // TODO: Rewrite the signature to return void?
3603     bool AnyChange = false;
3604     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3605     auto RetInstPred = [&](Instruction &I) {
3606       ReturnInst &RI = cast<ReturnInst>(I);
3607       if (!isa<UndefValue>(RI.getReturnValue()))
3608         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3609       return true;
3610     };
3611     bool UsedAssumedInformation = false;
3612     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret},
3613                               UsedAssumedInformation);
3614     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3615   }
3616 
3617   /// See AbstractAttribute::trackStatistics()
3618   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3619 };
3620 
3621 struct AAIsDeadFunction : public AAIsDead {
3622   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3623 
3624   /// See AbstractAttribute::initialize(...).
3625   void initialize(Attributor &A) override {
3626     const Function *F = getAnchorScope();
3627     if (F && !F->isDeclaration()) {
3628       // We only want to compute liveness once. If the function is not part of
3629       // the SCC, skip it.
3630       if (A.isRunOn(*const_cast<Function *>(F))) {
3631         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3632         assumeLive(A, F->getEntryBlock());
3633       } else {
3634         indicatePessimisticFixpoint();
3635       }
3636     }
3637   }
3638 
3639   /// See AbstractAttribute::getAsStr().
3640   const std::string getAsStr() const override {
3641     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3642            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3643            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3644            std::to_string(KnownDeadEnds.size()) + "]";
3645   }
3646 
3647   /// See AbstractAttribute::manifest(...).
3648   ChangeStatus manifest(Attributor &A) override {
3649     assert(getState().isValidState() &&
3650            "Attempted to manifest an invalid state!");
3651 
3652     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3653     Function &F = *getAnchorScope();
3654 
3655     if (AssumedLiveBlocks.empty()) {
3656       A.deleteAfterManifest(F);
3657       return ChangeStatus::CHANGED;
3658     }
3659 
3660     // Flag to determine if we can change an invoke to a call assuming the
3661     // callee is nounwind. This is not possible if the personality of the
3662     // function allows to catch asynchronous exceptions.
3663     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3664 
3665     KnownDeadEnds.set_union(ToBeExploredFrom);
3666     for (const Instruction *DeadEndI : KnownDeadEnds) {
3667       auto *CB = dyn_cast<CallBase>(DeadEndI);
3668       if (!CB)
3669         continue;
3670       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3671           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3672       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3673       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3674         continue;
3675 
3676       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3677         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3678       else
3679         A.changeToUnreachableAfterManifest(
3680             const_cast<Instruction *>(DeadEndI->getNextNode()));
3681       HasChanged = ChangeStatus::CHANGED;
3682     }
3683 
3684     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3685     for (BasicBlock &BB : F)
3686       if (!AssumedLiveBlocks.count(&BB)) {
3687         A.deleteAfterManifest(BB);
3688         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3689       }
3690 
3691     return HasChanged;
3692   }
3693 
3694   /// See AbstractAttribute::updateImpl(...).
3695   ChangeStatus updateImpl(Attributor &A) override;
3696 
3697   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3698     return !AssumedLiveEdges.count(std::make_pair(From, To));
3699   }
3700 
3701   /// See AbstractAttribute::trackStatistics()
3702   void trackStatistics() const override {}
3703 
3704   /// Returns true if the function is assumed dead.
3705   bool isAssumedDead() const override { return false; }
3706 
3707   /// See AAIsDead::isKnownDead().
3708   bool isKnownDead() const override { return false; }
3709 
3710   /// See AAIsDead::isAssumedDead(BasicBlock *).
3711   bool isAssumedDead(const BasicBlock *BB) const override {
3712     assert(BB->getParent() == getAnchorScope() &&
3713            "BB must be in the same anchor scope function.");
3714 
3715     if (!getAssumed())
3716       return false;
3717     return !AssumedLiveBlocks.count(BB);
3718   }
3719 
3720   /// See AAIsDead::isKnownDead(BasicBlock *).
3721   bool isKnownDead(const BasicBlock *BB) const override {
3722     return getKnown() && isAssumedDead(BB);
3723   }
3724 
3725   /// See AAIsDead::isAssumed(Instruction *I).
3726   bool isAssumedDead(const Instruction *I) const override {
3727     assert(I->getParent()->getParent() == getAnchorScope() &&
3728            "Instruction must be in the same anchor scope function.");
3729 
3730     if (!getAssumed())
3731       return false;
3732 
3733     // If it is not in AssumedLiveBlocks then it for sure dead.
3734     // Otherwise, it can still be after noreturn call in a live block.
3735     if (!AssumedLiveBlocks.count(I->getParent()))
3736       return true;
3737 
3738     // If it is not after a liveness barrier it is live.
3739     const Instruction *PrevI = I->getPrevNode();
3740     while (PrevI) {
3741       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3742         return true;
3743       PrevI = PrevI->getPrevNode();
3744     }
3745     return false;
3746   }
3747 
3748   /// See AAIsDead::isKnownDead(Instruction *I).
3749   bool isKnownDead(const Instruction *I) const override {
3750     return getKnown() && isAssumedDead(I);
3751   }
3752 
3753   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3754   /// that internal function called from \p BB should now be looked at.
3755   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3756     if (!AssumedLiveBlocks.insert(&BB).second)
3757       return false;
3758 
3759     // We assume that all of BB is (probably) live now and if there are calls to
3760     // internal functions we will assume that those are now live as well. This
3761     // is a performance optimization for blocks with calls to a lot of internal
3762     // functions. It can however cause dead functions to be treated as live.
3763     for (const Instruction &I : BB)
3764       if (const auto *CB = dyn_cast<CallBase>(&I))
3765         if (const Function *F = CB->getCalledFunction())
3766           if (F->hasLocalLinkage())
3767             A.markLiveInternalFunction(*F);
3768     return true;
3769   }
3770 
3771   /// Collection of instructions that need to be explored again, e.g., we
3772   /// did assume they do not transfer control to (one of their) successors.
3773   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3774 
3775   /// Collection of instructions that are known to not transfer control.
3776   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3777 
3778   /// Collection of all assumed live edges
3779   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3780 
3781   /// Collection of all assumed live BasicBlocks.
3782   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3783 };
3784 
3785 static bool
3786 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3787                         AbstractAttribute &AA,
3788                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3789   const IRPosition &IPos = IRPosition::callsite_function(CB);
3790 
3791   const auto &NoReturnAA =
3792       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3793   if (NoReturnAA.isAssumedNoReturn())
3794     return !NoReturnAA.isKnownNoReturn();
3795   if (CB.isTerminator())
3796     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3797   else
3798     AliveSuccessors.push_back(CB.getNextNode());
3799   return false;
3800 }
3801 
3802 static bool
3803 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3804                         AbstractAttribute &AA,
3805                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3806   bool UsedAssumedInformation =
3807       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3808 
3809   // First, determine if we can change an invoke to a call assuming the
3810   // callee is nounwind. This is not possible if the personality of the
3811   // function allows to catch asynchronous exceptions.
3812   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3813     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3814   } else {
3815     const IRPosition &IPos = IRPosition::callsite_function(II);
3816     const auto &AANoUnw =
3817         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3818     if (AANoUnw.isAssumedNoUnwind()) {
3819       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3820     } else {
3821       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3822     }
3823   }
3824   return UsedAssumedInformation;
3825 }
3826 
3827 static bool
3828 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3829                         AbstractAttribute &AA,
3830                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3831   bool UsedAssumedInformation = false;
3832   if (BI.getNumSuccessors() == 1) {
3833     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3834   } else {
3835     Optional<Constant *> C =
3836         A.getAssumedConstant(*BI.getCondition(), AA, UsedAssumedInformation);
3837     if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3838       // No value yet, assume both edges are dead.
3839     } else if (isa_and_nonnull<ConstantInt>(*C)) {
3840       const BasicBlock *SuccBB =
3841           BI.getSuccessor(1 - cast<ConstantInt>(*C)->getValue().getZExtValue());
3842       AliveSuccessors.push_back(&SuccBB->front());
3843     } else {
3844       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3845       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3846       UsedAssumedInformation = false;
3847     }
3848   }
3849   return UsedAssumedInformation;
3850 }
3851 
3852 static bool
3853 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3854                         AbstractAttribute &AA,
3855                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3856   bool UsedAssumedInformation = false;
3857   Optional<Constant *> C =
3858       A.getAssumedConstant(*SI.getCondition(), AA, UsedAssumedInformation);
3859   if (!C.hasValue() || isa_and_nonnull<UndefValue>(C.getValue())) {
3860     // No value yet, assume all edges are dead.
3861   } else if (isa_and_nonnull<ConstantInt>(C.getValue())) {
3862     for (auto &CaseIt : SI.cases()) {
3863       if (CaseIt.getCaseValue() == C.getValue()) {
3864         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3865         return UsedAssumedInformation;
3866       }
3867     }
3868     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3869     return UsedAssumedInformation;
3870   } else {
3871     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3872       AliveSuccessors.push_back(&SuccBB->front());
3873   }
3874   return UsedAssumedInformation;
3875 }
3876 
3877 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3878   ChangeStatus Change = ChangeStatus::UNCHANGED;
3879 
3880   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3881                     << getAnchorScope()->size() << "] BBs and "
3882                     << ToBeExploredFrom.size() << " exploration points and "
3883                     << KnownDeadEnds.size() << " known dead ends\n");
3884 
3885   // Copy and clear the list of instructions we need to explore from. It is
3886   // refilled with instructions the next update has to look at.
3887   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3888                                                ToBeExploredFrom.end());
3889   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3890 
3891   SmallVector<const Instruction *, 8> AliveSuccessors;
3892   while (!Worklist.empty()) {
3893     const Instruction *I = Worklist.pop_back_val();
3894     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3895 
3896     // Fast forward for uninteresting instructions. We could look for UB here
3897     // though.
3898     while (!I->isTerminator() && !isa<CallBase>(I))
3899       I = I->getNextNode();
3900 
3901     AliveSuccessors.clear();
3902 
3903     bool UsedAssumedInformation = false;
3904     switch (I->getOpcode()) {
3905     // TODO: look for (assumed) UB to backwards propagate "deadness".
3906     default:
3907       assert(I->isTerminator() &&
3908              "Expected non-terminators to be handled already!");
3909       for (const BasicBlock *SuccBB : successors(I->getParent()))
3910         AliveSuccessors.push_back(&SuccBB->front());
3911       break;
3912     case Instruction::Call:
3913       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3914                                                        *this, AliveSuccessors);
3915       break;
3916     case Instruction::Invoke:
3917       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3918                                                        *this, AliveSuccessors);
3919       break;
3920     case Instruction::Br:
3921       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3922                                                        *this, AliveSuccessors);
3923       break;
3924     case Instruction::Switch:
3925       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3926                                                        *this, AliveSuccessors);
3927       break;
3928     }
3929 
3930     if (UsedAssumedInformation) {
3931       NewToBeExploredFrom.insert(I);
3932     } else if (AliveSuccessors.empty() ||
3933                (I->isTerminator() &&
3934                 AliveSuccessors.size() < I->getNumSuccessors())) {
3935       if (KnownDeadEnds.insert(I))
3936         Change = ChangeStatus::CHANGED;
3937     }
3938 
3939     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3940                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3941                       << UsedAssumedInformation << "\n");
3942 
3943     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3944       if (!I->isTerminator()) {
3945         assert(AliveSuccessors.size() == 1 &&
3946                "Non-terminator expected to have a single successor!");
3947         Worklist.push_back(AliveSuccessor);
3948       } else {
3949         // record the assumed live edge
3950         auto Edge = std::make_pair(I->getParent(), AliveSuccessor->getParent());
3951         if (AssumedLiveEdges.insert(Edge).second)
3952           Change = ChangeStatus::CHANGED;
3953         if (assumeLive(A, *AliveSuccessor->getParent()))
3954           Worklist.push_back(AliveSuccessor);
3955       }
3956     }
3957   }
3958 
3959   // Check if the content of ToBeExploredFrom changed, ignore the order.
3960   if (NewToBeExploredFrom.size() != ToBeExploredFrom.size() ||
3961       llvm::any_of(NewToBeExploredFrom, [&](const Instruction *I) {
3962         return !ToBeExploredFrom.count(I);
3963       })) {
3964     Change = ChangeStatus::CHANGED;
3965     ToBeExploredFrom = std::move(NewToBeExploredFrom);
3966   }
3967 
3968   // If we know everything is live there is no need to query for liveness.
3969   // Instead, indicating a pessimistic fixpoint will cause the state to be
3970   // "invalid" and all queries to be answered conservatively without lookups.
3971   // To be in this state we have to (1) finished the exploration and (3) not
3972   // discovered any non-trivial dead end and (2) not ruled unreachable code
3973   // dead.
3974   if (ToBeExploredFrom.empty() &&
3975       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3976       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3977         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3978       }))
3979     return indicatePessimisticFixpoint();
3980   return Change;
3981 }
3982 
3983 /// Liveness information for a call sites.
3984 struct AAIsDeadCallSite final : AAIsDeadFunction {
3985   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3986       : AAIsDeadFunction(IRP, A) {}
3987 
3988   /// See AbstractAttribute::initialize(...).
3989   void initialize(Attributor &A) override {
3990     // TODO: Once we have call site specific value information we can provide
3991     //       call site specific liveness information and then it makes
3992     //       sense to specialize attributes for call sites instead of
3993     //       redirecting requests to the callee.
3994     llvm_unreachable("Abstract attributes for liveness are not "
3995                      "supported for call sites yet!");
3996   }
3997 
3998   /// See AbstractAttribute::updateImpl(...).
3999   ChangeStatus updateImpl(Attributor &A) override {
4000     return indicatePessimisticFixpoint();
4001   }
4002 
4003   /// See AbstractAttribute::trackStatistics()
4004   void trackStatistics() const override {}
4005 };
4006 
4007 /// -------------------- Dereferenceable Argument Attribute --------------------
4008 
4009 struct AADereferenceableImpl : AADereferenceable {
4010   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
4011       : AADereferenceable(IRP, A) {}
4012   using StateType = DerefState;
4013 
4014   /// See AbstractAttribute::initialize(...).
4015   void initialize(Attributor &A) override {
4016     SmallVector<Attribute, 4> Attrs;
4017     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
4018              Attrs, /* IgnoreSubsumingPositions */ false, &A);
4019     for (const Attribute &Attr : Attrs)
4020       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
4021 
4022     const IRPosition &IRP = this->getIRPosition();
4023     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
4024 
4025     bool CanBeNull, CanBeFreed;
4026     takeKnownDerefBytesMaximum(
4027         IRP.getAssociatedValue().getPointerDereferenceableBytes(
4028             A.getDataLayout(), CanBeNull, CanBeFreed));
4029 
4030     bool IsFnInterface = IRP.isFnInterfaceKind();
4031     Function *FnScope = IRP.getAnchorScope();
4032     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
4033       indicatePessimisticFixpoint();
4034       return;
4035     }
4036 
4037     if (Instruction *CtxI = getCtxI())
4038       followUsesInMBEC(*this, A, getState(), *CtxI);
4039   }
4040 
4041   /// See AbstractAttribute::getState()
4042   /// {
4043   StateType &getState() override { return *this; }
4044   const StateType &getState() const override { return *this; }
4045   /// }
4046 
4047   /// Helper function for collecting accessed bytes in must-be-executed-context
4048   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
4049                               DerefState &State) {
4050     const Value *UseV = U->get();
4051     if (!UseV->getType()->isPointerTy())
4052       return;
4053 
4054     Type *PtrTy = UseV->getType();
4055     const DataLayout &DL = A.getDataLayout();
4056     int64_t Offset;
4057     if (const Value *Base = getBasePointerOfAccessPointerOperand(
4058             I, Offset, DL, /*AllowNonInbounds*/ true)) {
4059       if (Base == &getAssociatedValue() &&
4060           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
4061         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
4062         State.addAccessedBytes(Offset, Size);
4063       }
4064     }
4065   }
4066 
4067   /// See followUsesInMBEC
4068   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4069                        AADereferenceable::StateType &State) {
4070     bool IsNonNull = false;
4071     bool TrackUse = false;
4072     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
4073         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
4074     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
4075                       << " for instruction " << *I << "\n");
4076 
4077     addAccessedBytesForUse(A, U, I, State);
4078     State.takeKnownDerefBytesMaximum(DerefBytes);
4079     return TrackUse;
4080   }
4081 
4082   /// See AbstractAttribute::manifest(...).
4083   ChangeStatus manifest(Attributor &A) override {
4084     ChangeStatus Change = AADereferenceable::manifest(A);
4085     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
4086       removeAttrs({Attribute::DereferenceableOrNull});
4087       return ChangeStatus::CHANGED;
4088     }
4089     return Change;
4090   }
4091 
4092   void getDeducedAttributes(LLVMContext &Ctx,
4093                             SmallVectorImpl<Attribute> &Attrs) const override {
4094     // TODO: Add *_globally support
4095     if (isAssumedNonNull())
4096       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
4097           Ctx, getAssumedDereferenceableBytes()));
4098     else
4099       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
4100           Ctx, getAssumedDereferenceableBytes()));
4101   }
4102 
4103   /// See AbstractAttribute::getAsStr().
4104   const std::string getAsStr() const override {
4105     if (!getAssumedDereferenceableBytes())
4106       return "unknown-dereferenceable";
4107     return std::string("dereferenceable") +
4108            (isAssumedNonNull() ? "" : "_or_null") +
4109            (isAssumedGlobal() ? "_globally" : "") + "<" +
4110            std::to_string(getKnownDereferenceableBytes()) + "-" +
4111            std::to_string(getAssumedDereferenceableBytes()) + ">";
4112   }
4113 };
4114 
4115 /// Dereferenceable attribute for a floating value.
4116 struct AADereferenceableFloating : AADereferenceableImpl {
4117   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
4118       : AADereferenceableImpl(IRP, A) {}
4119 
4120   /// See AbstractAttribute::updateImpl(...).
4121   ChangeStatus updateImpl(Attributor &A) override {
4122     const DataLayout &DL = A.getDataLayout();
4123 
4124     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
4125                             bool Stripped) -> bool {
4126       unsigned IdxWidth =
4127           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
4128       APInt Offset(IdxWidth, 0);
4129       const Value *Base =
4130           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
4131 
4132       const auto &AA = A.getAAFor<AADereferenceable>(
4133           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
4134       int64_t DerefBytes = 0;
4135       if (!Stripped && this == &AA) {
4136         // Use IR information if we did not strip anything.
4137         // TODO: track globally.
4138         bool CanBeNull, CanBeFreed;
4139         DerefBytes =
4140             Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
4141         T.GlobalState.indicatePessimisticFixpoint();
4142       } else {
4143         const DerefState &DS = AA.getState();
4144         DerefBytes = DS.DerefBytesState.getAssumed();
4145         T.GlobalState &= DS.GlobalState;
4146       }
4147 
4148       // For now we do not try to "increase" dereferenceability due to negative
4149       // indices as we first have to come up with code to deal with loops and
4150       // for overflows of the dereferenceable bytes.
4151       int64_t OffsetSExt = Offset.getSExtValue();
4152       if (OffsetSExt < 0)
4153         OffsetSExt = 0;
4154 
4155       T.takeAssumedDerefBytesMinimum(
4156           std::max(int64_t(0), DerefBytes - OffsetSExt));
4157 
4158       if (this == &AA) {
4159         if (!Stripped) {
4160           // If nothing was stripped IR information is all we got.
4161           T.takeKnownDerefBytesMaximum(
4162               std::max(int64_t(0), DerefBytes - OffsetSExt));
4163           T.indicatePessimisticFixpoint();
4164         } else if (OffsetSExt > 0) {
4165           // If something was stripped but there is circular reasoning we look
4166           // for the offset. If it is positive we basically decrease the
4167           // dereferenceable bytes in a circluar loop now, which will simply
4168           // drive them down to the known value in a very slow way which we
4169           // can accelerate.
4170           T.indicatePessimisticFixpoint();
4171         }
4172       }
4173 
4174       return T.isValidState();
4175     };
4176 
4177     DerefState T;
4178     if (!genericValueTraversal<DerefState>(A, getIRPosition(), *this, T,
4179                                            VisitValueCB, getCtxI()))
4180       return indicatePessimisticFixpoint();
4181 
4182     return clampStateAndIndicateChange(getState(), T);
4183   }
4184 
4185   /// See AbstractAttribute::trackStatistics()
4186   void trackStatistics() const override {
4187     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
4188   }
4189 };
4190 
4191 /// Dereferenceable attribute for a return value.
4192 struct AADereferenceableReturned final
4193     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
4194   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
4195       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
4196             IRP, A) {}
4197 
4198   /// See AbstractAttribute::trackStatistics()
4199   void trackStatistics() const override {
4200     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
4201   }
4202 };
4203 
4204 /// Dereferenceable attribute for an argument
4205 struct AADereferenceableArgument final
4206     : AAArgumentFromCallSiteArguments<AADereferenceable,
4207                                       AADereferenceableImpl> {
4208   using Base =
4209       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
4210   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
4211       : Base(IRP, A) {}
4212 
4213   /// See AbstractAttribute::trackStatistics()
4214   void trackStatistics() const override {
4215     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
4216   }
4217 };
4218 
4219 /// Dereferenceable attribute for a call site argument.
4220 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
4221   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
4222       : AADereferenceableFloating(IRP, A) {}
4223 
4224   /// See AbstractAttribute::trackStatistics()
4225   void trackStatistics() const override {
4226     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
4227   }
4228 };
4229 
4230 /// Dereferenceable attribute deduction for a call site return value.
4231 struct AADereferenceableCallSiteReturned final
4232     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
4233   using Base =
4234       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
4235   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
4236       : Base(IRP, A) {}
4237 
4238   /// See AbstractAttribute::trackStatistics()
4239   void trackStatistics() const override {
4240     STATS_DECLTRACK_CS_ATTR(dereferenceable);
4241   }
4242 };
4243 
4244 // ------------------------ Align Argument Attribute ------------------------
4245 
4246 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
4247                                     Value &AssociatedValue, const Use *U,
4248                                     const Instruction *I, bool &TrackUse) {
4249   // We need to follow common pointer manipulation uses to the accesses they
4250   // feed into.
4251   if (isa<CastInst>(I)) {
4252     // Follow all but ptr2int casts.
4253     TrackUse = !isa<PtrToIntInst>(I);
4254     return 0;
4255   }
4256   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4257     if (GEP->hasAllConstantIndices())
4258       TrackUse = true;
4259     return 0;
4260   }
4261 
4262   MaybeAlign MA;
4263   if (const auto *CB = dyn_cast<CallBase>(I)) {
4264     if (CB->isBundleOperand(U) || CB->isCallee(U))
4265       return 0;
4266 
4267     unsigned ArgNo = CB->getArgOperandNo(U);
4268     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
4269     // As long as we only use known information there is no need to track
4270     // dependences here.
4271     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
4272     MA = MaybeAlign(AlignAA.getKnownAlign());
4273   }
4274 
4275   const DataLayout &DL = A.getDataLayout();
4276   const Value *UseV = U->get();
4277   if (auto *SI = dyn_cast<StoreInst>(I)) {
4278     if (SI->getPointerOperand() == UseV)
4279       MA = SI->getAlign();
4280   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
4281     if (LI->getPointerOperand() == UseV)
4282       MA = LI->getAlign();
4283   }
4284 
4285   if (!MA || *MA <= QueryingAA.getKnownAlign())
4286     return 0;
4287 
4288   unsigned Alignment = MA->value();
4289   int64_t Offset;
4290 
4291   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
4292     if (Base == &AssociatedValue) {
4293       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4294       // So we can say that the maximum power of two which is a divisor of
4295       // gcd(Offset, Alignment) is an alignment.
4296 
4297       uint32_t gcd =
4298           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
4299       Alignment = llvm::PowerOf2Floor(gcd);
4300     }
4301   }
4302 
4303   return Alignment;
4304 }
4305 
4306 struct AAAlignImpl : AAAlign {
4307   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
4308 
4309   /// See AbstractAttribute::initialize(...).
4310   void initialize(Attributor &A) override {
4311     SmallVector<Attribute, 4> Attrs;
4312     getAttrs({Attribute::Alignment}, Attrs);
4313     for (const Attribute &Attr : Attrs)
4314       takeKnownMaximum(Attr.getValueAsInt());
4315 
4316     Value &V = getAssociatedValue();
4317     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
4318     //       use of the function pointer. This was caused by D73131. We want to
4319     //       avoid this for function pointers especially because we iterate
4320     //       their uses and int2ptr is not handled. It is not a correctness
4321     //       problem though!
4322     if (!V.getType()->getPointerElementType()->isFunctionTy())
4323       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
4324 
4325     if (getIRPosition().isFnInterfaceKind() &&
4326         (!getAnchorScope() ||
4327          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
4328       indicatePessimisticFixpoint();
4329       return;
4330     }
4331 
4332     if (Instruction *CtxI = getCtxI())
4333       followUsesInMBEC(*this, A, getState(), *CtxI);
4334   }
4335 
4336   /// See AbstractAttribute::manifest(...).
4337   ChangeStatus manifest(Attributor &A) override {
4338     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
4339 
4340     // Check for users that allow alignment annotations.
4341     Value &AssociatedValue = getAssociatedValue();
4342     for (const Use &U : AssociatedValue.uses()) {
4343       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
4344         if (SI->getPointerOperand() == &AssociatedValue)
4345           if (SI->getAlignment() < getAssumedAlign()) {
4346             STATS_DECLTRACK(AAAlign, Store,
4347                             "Number of times alignment added to a store");
4348             SI->setAlignment(Align(getAssumedAlign()));
4349             LoadStoreChanged = ChangeStatus::CHANGED;
4350           }
4351       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
4352         if (LI->getPointerOperand() == &AssociatedValue)
4353           if (LI->getAlignment() < getAssumedAlign()) {
4354             LI->setAlignment(Align(getAssumedAlign()));
4355             STATS_DECLTRACK(AAAlign, Load,
4356                             "Number of times alignment added to a load");
4357             LoadStoreChanged = ChangeStatus::CHANGED;
4358           }
4359       }
4360     }
4361 
4362     ChangeStatus Changed = AAAlign::manifest(A);
4363 
4364     Align InheritAlign =
4365         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4366     if (InheritAlign >= getAssumedAlign())
4367       return LoadStoreChanged;
4368     return Changed | LoadStoreChanged;
4369   }
4370 
4371   // TODO: Provide a helper to determine the implied ABI alignment and check in
4372   //       the existing manifest method and a new one for AAAlignImpl that value
4373   //       to avoid making the alignment explicit if it did not improve.
4374 
4375   /// See AbstractAttribute::getDeducedAttributes
4376   virtual void
4377   getDeducedAttributes(LLVMContext &Ctx,
4378                        SmallVectorImpl<Attribute> &Attrs) const override {
4379     if (getAssumedAlign() > 1)
4380       Attrs.emplace_back(
4381           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
4382   }
4383 
4384   /// See followUsesInMBEC
4385   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
4386                        AAAlign::StateType &State) {
4387     bool TrackUse = false;
4388 
4389     unsigned int KnownAlign =
4390         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
4391     State.takeKnownMaximum(KnownAlign);
4392 
4393     return TrackUse;
4394   }
4395 
4396   /// See AbstractAttribute::getAsStr().
4397   const std::string getAsStr() const override {
4398     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
4399                                 "-" + std::to_string(getAssumedAlign()) + ">")
4400                              : "unknown-align";
4401   }
4402 };
4403 
4404 /// Align attribute for a floating value.
4405 struct AAAlignFloating : AAAlignImpl {
4406   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
4407 
4408   /// See AbstractAttribute::updateImpl(...).
4409   ChangeStatus updateImpl(Attributor &A) override {
4410     const DataLayout &DL = A.getDataLayout();
4411 
4412     auto VisitValueCB = [&](Value &V, const Instruction *,
4413                             AAAlign::StateType &T, bool Stripped) -> bool {
4414       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
4415                                            DepClassTy::REQUIRED);
4416       if (!Stripped && this == &AA) {
4417         int64_t Offset;
4418         unsigned Alignment = 1;
4419         if (const Value *Base =
4420                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
4421           Align PA = Base->getPointerAlignment(DL);
4422           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
4423           // So we can say that the maximum power of two which is a divisor of
4424           // gcd(Offset, Alignment) is an alignment.
4425 
4426           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
4427                                                uint32_t(PA.value()));
4428           Alignment = llvm::PowerOf2Floor(gcd);
4429         } else {
4430           Alignment = V.getPointerAlignment(DL).value();
4431         }
4432         // Use only IR information if we did not strip anything.
4433         T.takeKnownMaximum(Alignment);
4434         T.indicatePessimisticFixpoint();
4435       } else {
4436         // Use abstract attribute information.
4437         const AAAlign::StateType &DS = AA.getState();
4438         T ^= DS;
4439       }
4440       return T.isValidState();
4441     };
4442 
4443     StateType T;
4444     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
4445                                           VisitValueCB, getCtxI()))
4446       return indicatePessimisticFixpoint();
4447 
4448     // TODO: If we know we visited all incoming values, thus no are assumed
4449     // dead, we can take the known information from the state T.
4450     return clampStateAndIndicateChange(getState(), T);
4451   }
4452 
4453   /// See AbstractAttribute::trackStatistics()
4454   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
4455 };
4456 
4457 /// Align attribute for function return value.
4458 struct AAAlignReturned final
4459     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
4460   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
4461   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4462 
4463   /// See AbstractAttribute::initialize(...).
4464   void initialize(Attributor &A) override {
4465     Base::initialize(A);
4466     Function *F = getAssociatedFunction();
4467     if (!F || F->isDeclaration())
4468       indicatePessimisticFixpoint();
4469   }
4470 
4471   /// See AbstractAttribute::trackStatistics()
4472   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4473 };
4474 
4475 /// Align attribute for function argument.
4476 struct AAAlignArgument final
4477     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4478   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4479   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4480 
4481   /// See AbstractAttribute::manifest(...).
4482   ChangeStatus manifest(Attributor &A) override {
4483     // If the associated argument is involved in a must-tail call we give up
4484     // because we would need to keep the argument alignments of caller and
4485     // callee in-sync. Just does not seem worth the trouble right now.
4486     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4487       return ChangeStatus::UNCHANGED;
4488     return Base::manifest(A);
4489   }
4490 
4491   /// See AbstractAttribute::trackStatistics()
4492   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4493 };
4494 
4495 struct AAAlignCallSiteArgument final : AAAlignFloating {
4496   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4497       : AAAlignFloating(IRP, A) {}
4498 
4499   /// See AbstractAttribute::manifest(...).
4500   ChangeStatus manifest(Attributor &A) override {
4501     // If the associated argument is involved in a must-tail call we give up
4502     // because we would need to keep the argument alignments of caller and
4503     // callee in-sync. Just does not seem worth the trouble right now.
4504     if (Argument *Arg = getAssociatedArgument())
4505       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4506         return ChangeStatus::UNCHANGED;
4507     ChangeStatus Changed = AAAlignImpl::manifest(A);
4508     Align InheritAlign =
4509         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4510     if (InheritAlign >= getAssumedAlign())
4511       Changed = ChangeStatus::UNCHANGED;
4512     return Changed;
4513   }
4514 
4515   /// See AbstractAttribute::updateImpl(Attributor &A).
4516   ChangeStatus updateImpl(Attributor &A) override {
4517     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4518     if (Argument *Arg = getAssociatedArgument()) {
4519       // We only take known information from the argument
4520       // so we do not need to track a dependence.
4521       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4522           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4523       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4524     }
4525     return Changed;
4526   }
4527 
4528   /// See AbstractAttribute::trackStatistics()
4529   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4530 };
4531 
4532 /// Align attribute deduction for a call site return value.
4533 struct AAAlignCallSiteReturned final
4534     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4535   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4536   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4537       : Base(IRP, A) {}
4538 
4539   /// See AbstractAttribute::initialize(...).
4540   void initialize(Attributor &A) override {
4541     Base::initialize(A);
4542     Function *F = getAssociatedFunction();
4543     if (!F || F->isDeclaration())
4544       indicatePessimisticFixpoint();
4545   }
4546 
4547   /// See AbstractAttribute::trackStatistics()
4548   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4549 };
4550 
4551 /// ------------------ Function No-Return Attribute ----------------------------
4552 struct AANoReturnImpl : public AANoReturn {
4553   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4554 
4555   /// See AbstractAttribute::initialize(...).
4556   void initialize(Attributor &A) override {
4557     AANoReturn::initialize(A);
4558     Function *F = getAssociatedFunction();
4559     if (!F || F->isDeclaration())
4560       indicatePessimisticFixpoint();
4561   }
4562 
4563   /// See AbstractAttribute::getAsStr().
4564   const std::string getAsStr() const override {
4565     return getAssumed() ? "noreturn" : "may-return";
4566   }
4567 
4568   /// See AbstractAttribute::updateImpl(Attributor &A).
4569   virtual ChangeStatus updateImpl(Attributor &A) override {
4570     auto CheckForNoReturn = [](Instruction &) { return false; };
4571     bool UsedAssumedInformation = false;
4572     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4573                                    {(unsigned)Instruction::Ret},
4574                                    UsedAssumedInformation))
4575       return indicatePessimisticFixpoint();
4576     return ChangeStatus::UNCHANGED;
4577   }
4578 };
4579 
4580 struct AANoReturnFunction final : AANoReturnImpl {
4581   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4582       : AANoReturnImpl(IRP, A) {}
4583 
4584   /// See AbstractAttribute::trackStatistics()
4585   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4586 };
4587 
4588 /// NoReturn attribute deduction for a call sites.
4589 struct AANoReturnCallSite final : AANoReturnImpl {
4590   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4591       : AANoReturnImpl(IRP, A) {}
4592 
4593   /// See AbstractAttribute::initialize(...).
4594   void initialize(Attributor &A) override {
4595     AANoReturnImpl::initialize(A);
4596     if (Function *F = getAssociatedFunction()) {
4597       const IRPosition &FnPos = IRPosition::function(*F);
4598       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4599       if (!FnAA.isAssumedNoReturn())
4600         indicatePessimisticFixpoint();
4601     }
4602   }
4603 
4604   /// See AbstractAttribute::updateImpl(...).
4605   ChangeStatus updateImpl(Attributor &A) override {
4606     // TODO: Once we have call site specific value information we can provide
4607     //       call site specific liveness information and then it makes
4608     //       sense to specialize attributes for call sites arguments instead of
4609     //       redirecting requests to the callee argument.
4610     Function *F = getAssociatedFunction();
4611     const IRPosition &FnPos = IRPosition::function(*F);
4612     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4613     return clampStateAndIndicateChange(getState(), FnAA.getState());
4614   }
4615 
4616   /// See AbstractAttribute::trackStatistics()
4617   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4618 };
4619 
4620 /// ----------------------- Variable Capturing ---------------------------------
4621 
4622 /// A class to hold the state of for no-capture attributes.
4623 struct AANoCaptureImpl : public AANoCapture {
4624   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4625 
4626   /// See AbstractAttribute::initialize(...).
4627   void initialize(Attributor &A) override {
4628     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4629       indicateOptimisticFixpoint();
4630       return;
4631     }
4632     Function *AnchorScope = getAnchorScope();
4633     if (isFnInterfaceKind() &&
4634         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4635       indicatePessimisticFixpoint();
4636       return;
4637     }
4638 
4639     // You cannot "capture" null in the default address space.
4640     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4641         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4642       indicateOptimisticFixpoint();
4643       return;
4644     }
4645 
4646     const Function *F =
4647         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4648 
4649     // Check what state the associated function can actually capture.
4650     if (F)
4651       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4652     else
4653       indicatePessimisticFixpoint();
4654   }
4655 
4656   /// See AbstractAttribute::updateImpl(...).
4657   ChangeStatus updateImpl(Attributor &A) override;
4658 
4659   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4660   virtual void
4661   getDeducedAttributes(LLVMContext &Ctx,
4662                        SmallVectorImpl<Attribute> &Attrs) const override {
4663     if (!isAssumedNoCaptureMaybeReturned())
4664       return;
4665 
4666     if (isArgumentPosition()) {
4667       if (isAssumedNoCapture())
4668         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4669       else if (ManifestInternal)
4670         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4671     }
4672   }
4673 
4674   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4675   /// depending on the ability of the function associated with \p IRP to capture
4676   /// state in memory and through "returning/throwing", respectively.
4677   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4678                                                    const Function &F,
4679                                                    BitIntegerState &State) {
4680     // TODO: Once we have memory behavior attributes we should use them here.
4681 
4682     // If we know we cannot communicate or write to memory, we do not care about
4683     // ptr2int anymore.
4684     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4685         F.getReturnType()->isVoidTy()) {
4686       State.addKnownBits(NO_CAPTURE);
4687       return;
4688     }
4689 
4690     // A function cannot capture state in memory if it only reads memory, it can
4691     // however return/throw state and the state might be influenced by the
4692     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4693     if (F.onlyReadsMemory())
4694       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4695 
4696     // A function cannot communicate state back if it does not through
4697     // exceptions and doesn not return values.
4698     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4699       State.addKnownBits(NOT_CAPTURED_IN_RET);
4700 
4701     // Check existing "returned" attributes.
4702     int ArgNo = IRP.getCalleeArgNo();
4703     if (F.doesNotThrow() && ArgNo >= 0) {
4704       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4705         if (F.hasParamAttribute(u, Attribute::Returned)) {
4706           if (u == unsigned(ArgNo))
4707             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4708           else if (F.onlyReadsMemory())
4709             State.addKnownBits(NO_CAPTURE);
4710           else
4711             State.addKnownBits(NOT_CAPTURED_IN_RET);
4712           break;
4713         }
4714     }
4715   }
4716 
4717   /// See AbstractState::getAsStr().
4718   const std::string getAsStr() const override {
4719     if (isKnownNoCapture())
4720       return "known not-captured";
4721     if (isAssumedNoCapture())
4722       return "assumed not-captured";
4723     if (isKnownNoCaptureMaybeReturned())
4724       return "known not-captured-maybe-returned";
4725     if (isAssumedNoCaptureMaybeReturned())
4726       return "assumed not-captured-maybe-returned";
4727     return "assumed-captured";
4728   }
4729 };
4730 
4731 /// Attributor-aware capture tracker.
4732 struct AACaptureUseTracker final : public CaptureTracker {
4733 
4734   /// Create a capture tracker that can lookup in-flight abstract attributes
4735   /// through the Attributor \p A.
4736   ///
4737   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4738   /// search is stopped. If a use leads to a return instruction,
4739   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4740   /// If a use leads to a ptr2int which may capture the value,
4741   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4742   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4743   /// set. All values in \p PotentialCopies are later tracked as well. For every
4744   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4745   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4746   /// conservatively set to true.
4747   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4748                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4749                       SmallSetVector<Value *, 4> &PotentialCopies,
4750                       unsigned &RemainingUsesToExplore)
4751       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4752         PotentialCopies(PotentialCopies),
4753         RemainingUsesToExplore(RemainingUsesToExplore) {}
4754 
4755   /// Determine if \p V maybe captured. *Also updates the state!*
4756   bool valueMayBeCaptured(const Value *V) {
4757     if (V->getType()->isPointerTy()) {
4758       PointerMayBeCaptured(V, this);
4759     } else {
4760       State.indicatePessimisticFixpoint();
4761     }
4762     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4763   }
4764 
4765   /// See CaptureTracker::tooManyUses().
4766   void tooManyUses() override {
4767     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4768   }
4769 
4770   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4771     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4772       return true;
4773     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4774         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4775     return DerefAA.getAssumedDereferenceableBytes();
4776   }
4777 
4778   /// See CaptureTracker::captured(...).
4779   bool captured(const Use *U) override {
4780     Instruction *UInst = cast<Instruction>(U->getUser());
4781     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4782                       << "\n");
4783 
4784     // Because we may reuse the tracker multiple times we keep track of the
4785     // number of explored uses ourselves as well.
4786     if (RemainingUsesToExplore-- == 0) {
4787       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4788       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4789                           /* Return */ true);
4790     }
4791 
4792     // Deal with ptr2int by following uses.
4793     if (isa<PtrToIntInst>(UInst)) {
4794       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4795       return valueMayBeCaptured(UInst);
4796     }
4797 
4798     // For stores we check if we can follow the value through memory or not.
4799     if (auto *SI = dyn_cast<StoreInst>(UInst)) {
4800       if (SI->isVolatile())
4801         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4802                             /* Return */ false);
4803       bool UsedAssumedInformation = false;
4804       if (!AA::getPotentialCopiesOfStoredValue(
4805               A, *SI, PotentialCopies, NoCaptureAA, UsedAssumedInformation))
4806         return isCapturedIn(/* Memory */ true, /* Integer */ false,
4807                             /* Return */ false);
4808       // Not captured directly, potential copies will be checked.
4809       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4810                           /* Return */ false);
4811     }
4812 
4813     // Explicitly catch return instructions.
4814     if (isa<ReturnInst>(UInst)) {
4815       if (UInst->getFunction() == NoCaptureAA.getAnchorScope())
4816         return isCapturedIn(/* Memory */ false, /* Integer */ false,
4817                             /* Return */ true);
4818       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4819                           /* Return */ true);
4820     }
4821 
4822     // For now we only use special logic for call sites. However, the tracker
4823     // itself knows about a lot of other non-capturing cases already.
4824     auto *CB = dyn_cast<CallBase>(UInst);
4825     if (!CB || !CB->isArgOperand(U))
4826       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4827                           /* Return */ true);
4828 
4829     unsigned ArgNo = CB->getArgOperandNo(U);
4830     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4831     // If we have a abstract no-capture attribute for the argument we can use
4832     // it to justify a non-capture attribute here. This allows recursion!
4833     auto &ArgNoCaptureAA =
4834         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4835     if (ArgNoCaptureAA.isAssumedNoCapture())
4836       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4837                           /* Return */ false);
4838     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4839       addPotentialCopy(*CB);
4840       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4841                           /* Return */ false);
4842     }
4843 
4844     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4845     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4846                         /* Return */ true);
4847   }
4848 
4849   /// Register \p CS as potential copy of the value we are checking.
4850   void addPotentialCopy(CallBase &CB) { PotentialCopies.insert(&CB); }
4851 
4852   /// See CaptureTracker::shouldExplore(...).
4853   bool shouldExplore(const Use *U) override {
4854     // Check liveness and ignore droppable users.
4855     bool UsedAssumedInformation = false;
4856     return !U->getUser()->isDroppable() &&
4857            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA,
4858                             UsedAssumedInformation);
4859   }
4860 
4861   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4862   /// \p CapturedInRet, then return the appropriate value for use in the
4863   /// CaptureTracker::captured() interface.
4864   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4865                     bool CapturedInRet) {
4866     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4867                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4868     if (CapturedInMem)
4869       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4870     if (CapturedInInt)
4871       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4872     if (CapturedInRet)
4873       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4874     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4875   }
4876 
4877 private:
4878   /// The attributor providing in-flight abstract attributes.
4879   Attributor &A;
4880 
4881   /// The abstract attribute currently updated.
4882   AANoCapture &NoCaptureAA;
4883 
4884   /// The abstract liveness state.
4885   const AAIsDead &IsDeadAA;
4886 
4887   /// The state currently updated.
4888   AANoCapture::StateType &State;
4889 
4890   /// Set of potential copies of the tracked value.
4891   SmallSetVector<Value *, 4> &PotentialCopies;
4892 
4893   /// Global counter to limit the number of explored uses.
4894   unsigned &RemainingUsesToExplore;
4895 };
4896 
4897 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4898   const IRPosition &IRP = getIRPosition();
4899   Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4900                                   : &IRP.getAssociatedValue();
4901   if (!V)
4902     return indicatePessimisticFixpoint();
4903 
4904   const Function *F =
4905       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4906   assert(F && "Expected a function!");
4907   const IRPosition &FnPos = IRPosition::function(*F);
4908   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4909 
4910   AANoCapture::StateType T;
4911 
4912   // Readonly means we cannot capture through memory.
4913   const auto &FnMemAA =
4914       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4915   if (FnMemAA.isAssumedReadOnly()) {
4916     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4917     if (FnMemAA.isKnownReadOnly())
4918       addKnownBits(NOT_CAPTURED_IN_MEM);
4919     else
4920       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4921   }
4922 
4923   // Make sure all returned values are different than the underlying value.
4924   // TODO: we could do this in a more sophisticated way inside
4925   //       AAReturnedValues, e.g., track all values that escape through returns
4926   //       directly somehow.
4927   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4928     bool SeenConstant = false;
4929     for (auto &It : RVAA.returned_values()) {
4930       if (isa<Constant>(It.first)) {
4931         if (SeenConstant)
4932           return false;
4933         SeenConstant = true;
4934       } else if (!isa<Argument>(It.first) ||
4935                  It.first == getAssociatedArgument())
4936         return false;
4937     }
4938     return true;
4939   };
4940 
4941   const auto &NoUnwindAA =
4942       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4943   if (NoUnwindAA.isAssumedNoUnwind()) {
4944     bool IsVoidTy = F->getReturnType()->isVoidTy();
4945     const AAReturnedValues *RVAA =
4946         IsVoidTy ? nullptr
4947                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4948 
4949                                                  DepClassTy::OPTIONAL);
4950     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4951       T.addKnownBits(NOT_CAPTURED_IN_RET);
4952       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4953         return ChangeStatus::UNCHANGED;
4954       if (NoUnwindAA.isKnownNoUnwind() &&
4955           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4956         addKnownBits(NOT_CAPTURED_IN_RET);
4957         if (isKnown(NOT_CAPTURED_IN_MEM))
4958           return indicateOptimisticFixpoint();
4959       }
4960     }
4961   }
4962 
4963   // Use the CaptureTracker interface and logic with the specialized tracker,
4964   // defined in AACaptureUseTracker, that can look at in-flight abstract
4965   // attributes and directly updates the assumed state.
4966   SmallSetVector<Value *, 4> PotentialCopies;
4967   unsigned RemainingUsesToExplore =
4968       getDefaultMaxUsesToExploreForCaptureTracking();
4969   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4970                               RemainingUsesToExplore);
4971 
4972   // Check all potential copies of the associated value until we can assume
4973   // none will be captured or we have to assume at least one might be.
4974   unsigned Idx = 0;
4975   PotentialCopies.insert(V);
4976   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4977     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4978 
4979   AANoCapture::StateType &S = getState();
4980   auto Assumed = S.getAssumed();
4981   S.intersectAssumedBits(T.getAssumed());
4982   if (!isAssumedNoCaptureMaybeReturned())
4983     return indicatePessimisticFixpoint();
4984   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4985                                    : ChangeStatus::CHANGED;
4986 }
4987 
4988 /// NoCapture attribute for function arguments.
4989 struct AANoCaptureArgument final : AANoCaptureImpl {
4990   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4991       : AANoCaptureImpl(IRP, A) {}
4992 
4993   /// See AbstractAttribute::trackStatistics()
4994   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4995 };
4996 
4997 /// NoCapture attribute for call site arguments.
4998 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4999   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
5000       : AANoCaptureImpl(IRP, A) {}
5001 
5002   /// See AbstractAttribute::initialize(...).
5003   void initialize(Attributor &A) override {
5004     if (Argument *Arg = getAssociatedArgument())
5005       if (Arg->hasByValAttr())
5006         indicateOptimisticFixpoint();
5007     AANoCaptureImpl::initialize(A);
5008   }
5009 
5010   /// See AbstractAttribute::updateImpl(...).
5011   ChangeStatus updateImpl(Attributor &A) override {
5012     // TODO: Once we have call site specific value information we can provide
5013     //       call site specific liveness information and then it makes
5014     //       sense to specialize attributes for call sites arguments instead of
5015     //       redirecting requests to the callee argument.
5016     Argument *Arg = getAssociatedArgument();
5017     if (!Arg)
5018       return indicatePessimisticFixpoint();
5019     const IRPosition &ArgPos = IRPosition::argument(*Arg);
5020     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
5021     return clampStateAndIndicateChange(getState(), ArgAA.getState());
5022   }
5023 
5024   /// See AbstractAttribute::trackStatistics()
5025   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
5026 };
5027 
5028 /// NoCapture attribute for floating values.
5029 struct AANoCaptureFloating final : AANoCaptureImpl {
5030   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
5031       : AANoCaptureImpl(IRP, A) {}
5032 
5033   /// See AbstractAttribute::trackStatistics()
5034   void trackStatistics() const override {
5035     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
5036   }
5037 };
5038 
5039 /// NoCapture attribute for function return value.
5040 struct AANoCaptureReturned final : AANoCaptureImpl {
5041   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
5042       : AANoCaptureImpl(IRP, A) {
5043     llvm_unreachable("NoCapture is not applicable to function returns!");
5044   }
5045 
5046   /// See AbstractAttribute::initialize(...).
5047   void initialize(Attributor &A) override {
5048     llvm_unreachable("NoCapture is not applicable to function returns!");
5049   }
5050 
5051   /// See AbstractAttribute::updateImpl(...).
5052   ChangeStatus updateImpl(Attributor &A) override {
5053     llvm_unreachable("NoCapture is not applicable to function returns!");
5054   }
5055 
5056   /// See AbstractAttribute::trackStatistics()
5057   void trackStatistics() const override {}
5058 };
5059 
5060 /// NoCapture attribute deduction for a call site return value.
5061 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
5062   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
5063       : AANoCaptureImpl(IRP, A) {}
5064 
5065   /// See AbstractAttribute::initialize(...).
5066   void initialize(Attributor &A) override {
5067     const Function *F = getAnchorScope();
5068     // Check what state the associated function can actually capture.
5069     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
5070   }
5071 
5072   /// See AbstractAttribute::trackStatistics()
5073   void trackStatistics() const override {
5074     STATS_DECLTRACK_CSRET_ATTR(nocapture)
5075   }
5076 };
5077 
5078 /// ------------------ Value Simplify Attribute ----------------------------
5079 
5080 bool ValueSimplifyStateType::unionAssumed(Optional<Value *> Other) {
5081   // FIXME: Add a typecast support.
5082   SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5083       SimplifiedAssociatedValue, Other, Ty);
5084   if (SimplifiedAssociatedValue == Optional<Value *>(nullptr))
5085     return false;
5086 
5087   LLVM_DEBUG({
5088     if (SimplifiedAssociatedValue.hasValue())
5089       dbgs() << "[ValueSimplify] is assumed to be "
5090              << **SimplifiedAssociatedValue << "\n";
5091     else
5092       dbgs() << "[ValueSimplify] is assumed to be <none>\n";
5093   });
5094   return true;
5095 }
5096 
5097 struct AAValueSimplifyImpl : AAValueSimplify {
5098   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
5099       : AAValueSimplify(IRP, A) {}
5100 
5101   /// See AbstractAttribute::initialize(...).
5102   void initialize(Attributor &A) override {
5103     if (getAssociatedValue().getType()->isVoidTy())
5104       indicatePessimisticFixpoint();
5105     if (A.hasSimplificationCallback(getIRPosition()))
5106       indicatePessimisticFixpoint();
5107   }
5108 
5109   /// See AbstractAttribute::getAsStr().
5110   const std::string getAsStr() const override {
5111     LLVM_DEBUG({
5112       errs() << "SAV: " << SimplifiedAssociatedValue << " ";
5113       if (SimplifiedAssociatedValue && *SimplifiedAssociatedValue)
5114         errs() << "SAV: " << **SimplifiedAssociatedValue << " ";
5115     });
5116     return isValidState() ? (isAtFixpoint() ? "simplified" : "maybe-simple")
5117                           : "not-simple";
5118   }
5119 
5120   /// See AbstractAttribute::trackStatistics()
5121   void trackStatistics() const override {}
5122 
5123   /// See AAValueSimplify::getAssumedSimplifiedValue()
5124   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5125     return SimplifiedAssociatedValue;
5126   }
5127 
5128   /// Return a value we can use as replacement for the associated one, or
5129   /// nullptr if we don't have one that makes sense.
5130   Value *getReplacementValue(Attributor &A) const {
5131     Value *NewV;
5132     NewV = SimplifiedAssociatedValue.hasValue()
5133                ? SimplifiedAssociatedValue.getValue()
5134                : UndefValue::get(getAssociatedType());
5135     if (!NewV)
5136       return nullptr;
5137     NewV = AA::getWithType(*NewV, *getAssociatedType());
5138     if (!NewV || NewV == &getAssociatedValue())
5139       return nullptr;
5140     const Instruction *CtxI = getCtxI();
5141     if (CtxI && !AA::isValidAtPosition(*NewV, *CtxI, A.getInfoCache()))
5142       return nullptr;
5143     if (!CtxI && !AA::isValidInScope(*NewV, getAnchorScope()))
5144       return nullptr;
5145     return NewV;
5146   }
5147 
5148   /// Helper function for querying AAValueSimplify and updating candicate.
5149   /// \param IRP The value position we are trying to unify with SimplifiedValue
5150   bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
5151                       const IRPosition &IRP, bool Simplify = true) {
5152     bool UsedAssumedInformation = false;
5153     Optional<Value *> QueryingValueSimplified = &IRP.getAssociatedValue();
5154     if (Simplify)
5155       QueryingValueSimplified =
5156           A.getAssumedSimplified(IRP, QueryingAA, UsedAssumedInformation);
5157     return unionAssumed(QueryingValueSimplified);
5158   }
5159 
5160   /// Returns a candidate is found or not
5161   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
5162     if (!getAssociatedValue().getType()->isIntegerTy())
5163       return false;
5164 
5165     // This will also pass the call base context.
5166     const auto &AA =
5167         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
5168 
5169     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
5170 
5171     if (!COpt.hasValue()) {
5172       SimplifiedAssociatedValue = llvm::None;
5173       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5174       return true;
5175     }
5176     if (auto *C = COpt.getValue()) {
5177       SimplifiedAssociatedValue = C;
5178       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
5179       return true;
5180     }
5181     return false;
5182   }
5183 
5184   bool askSimplifiedValueForOtherAAs(Attributor &A) {
5185     if (askSimplifiedValueFor<AAValueConstantRange>(A))
5186       return true;
5187     if (askSimplifiedValueFor<AAPotentialValues>(A))
5188       return true;
5189     return false;
5190   }
5191 
5192   /// See AbstractAttribute::manifest(...).
5193   ChangeStatus manifest(Attributor &A) override {
5194     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5195     if (getAssociatedValue().user_empty())
5196       return Changed;
5197 
5198     if (auto *NewV = getReplacementValue(A)) {
5199       LLVM_DEBUG(dbgs() << "[ValueSimplify] " << getAssociatedValue() << " -> "
5200                         << *NewV << " :: " << *this << "\n");
5201       if (A.changeValueAfterManifest(getAssociatedValue(), *NewV))
5202         Changed = ChangeStatus::CHANGED;
5203     }
5204 
5205     return Changed | AAValueSimplify::manifest(A);
5206   }
5207 
5208   /// See AbstractState::indicatePessimisticFixpoint(...).
5209   ChangeStatus indicatePessimisticFixpoint() override {
5210     SimplifiedAssociatedValue = &getAssociatedValue();
5211     return AAValueSimplify::indicatePessimisticFixpoint();
5212   }
5213 
5214   static bool handleLoad(Attributor &A, const AbstractAttribute &AA,
5215                          LoadInst &L, function_ref<bool(Value &)> Union) {
5216     auto UnionWrapper = [&](Value &V, Value &Obj) {
5217       if (isa<AllocaInst>(Obj))
5218         return Union(V);
5219       if (!AA::isDynamicallyUnique(A, AA, V))
5220         return false;
5221       if (!AA::isValidAtPosition(V, L, A.getInfoCache()))
5222         return false;
5223       return Union(V);
5224     };
5225 
5226     Value &Ptr = *L.getPointerOperand();
5227     SmallVector<Value *, 8> Objects;
5228     if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, AA, &L))
5229       return false;
5230 
5231     for (Value *Obj : Objects) {
5232       LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
5233       if (isa<UndefValue>(Obj))
5234         continue;
5235       if (isa<ConstantPointerNull>(Obj)) {
5236         // A null pointer access can be undefined but any offset from null may
5237         // be OK. We do not try to optimize the latter.
5238         bool UsedAssumedInformation = false;
5239         if (!NullPointerIsDefined(L.getFunction(),
5240                                   Ptr.getType()->getPointerAddressSpace()) &&
5241             A.getAssumedSimplified(Ptr, AA, UsedAssumedInformation) == Obj)
5242           continue;
5243         return false;
5244       }
5245       if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj))
5246         return false;
5247       Constant *InitialVal = AA::getInitialValueForObj(*Obj, *L.getType());
5248       if (!InitialVal || !Union(*InitialVal))
5249         return false;
5250 
5251       LLVM_DEBUG(dbgs() << "Underlying object amenable to load-store "
5252                            "propagation, checking accesses next.\n");
5253 
5254       auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
5255         LLVM_DEBUG(dbgs() << " - visit access " << Acc << "\n");
5256         if (!Acc.isWrite())
5257           return true;
5258         if (Acc.isWrittenValueYetUndetermined())
5259           return true;
5260         Value *Content = Acc.getWrittenValue();
5261         if (!Content)
5262           return false;
5263         Value *CastedContent =
5264             AA::getWithType(*Content, *AA.getAssociatedType());
5265         if (!CastedContent)
5266           return false;
5267         if (IsExact)
5268           return UnionWrapper(*CastedContent, *Obj);
5269         if (auto *C = dyn_cast<Constant>(CastedContent))
5270           if (C->isNullValue() || C->isAllOnesValue() || isa<UndefValue>(C))
5271             return UnionWrapper(*CastedContent, *Obj);
5272         return false;
5273       };
5274 
5275       auto &PI = A.getAAFor<AAPointerInfo>(AA, IRPosition::value(*Obj),
5276                                            DepClassTy::REQUIRED);
5277       if (!PI.forallInterferingAccesses(L, CheckAccess))
5278         return false;
5279     }
5280     return true;
5281   }
5282 };
5283 
5284 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
5285   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
5286       : AAValueSimplifyImpl(IRP, A) {}
5287 
5288   void initialize(Attributor &A) override {
5289     AAValueSimplifyImpl::initialize(A);
5290     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
5291       indicatePessimisticFixpoint();
5292     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
5293                  Attribute::StructRet, Attribute::Nest, Attribute::ByVal},
5294                 /* IgnoreSubsumingPositions */ true))
5295       indicatePessimisticFixpoint();
5296 
5297     // FIXME: This is a hack to prevent us from propagating function poiner in
5298     // the new pass manager CGSCC pass as it creates call edges the
5299     // CallGraphUpdater cannot handle yet.
5300     Value &V = getAssociatedValue();
5301     if (V.getType()->isPointerTy() &&
5302         V.getType()->getPointerElementType()->isFunctionTy() &&
5303         !A.isModulePass())
5304       indicatePessimisticFixpoint();
5305   }
5306 
5307   /// See AbstractAttribute::updateImpl(...).
5308   ChangeStatus updateImpl(Attributor &A) override {
5309     // Byval is only replacable if it is readonly otherwise we would write into
5310     // the replaced value and not the copy that byval creates implicitly.
5311     Argument *Arg = getAssociatedArgument();
5312     if (Arg->hasByValAttr()) {
5313       // TODO: We probably need to verify synchronization is not an issue, e.g.,
5314       //       there is no race by not copying a constant byval.
5315       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
5316                                                        DepClassTy::REQUIRED);
5317       if (!MemAA.isAssumedReadOnly())
5318         return indicatePessimisticFixpoint();
5319     }
5320 
5321     auto Before = SimplifiedAssociatedValue;
5322 
5323     auto PredForCallSite = [&](AbstractCallSite ACS) {
5324       const IRPosition &ACSArgPos =
5325           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
5326       // Check if a coresponding argument was found or if it is on not
5327       // associated (which can happen for callback calls).
5328       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5329         return false;
5330 
5331       // Simplify the argument operand explicitly and check if the result is
5332       // valid in the current scope. This avoids refering to simplified values
5333       // in other functions, e.g., we don't want to say a an argument in a
5334       // static function is actually an argument in a different function.
5335       bool UsedAssumedInformation = false;
5336       Optional<Constant *> SimpleArgOp =
5337           A.getAssumedConstant(ACSArgPos, *this, UsedAssumedInformation);
5338       if (!SimpleArgOp.hasValue())
5339         return true;
5340       if (!SimpleArgOp.getValue())
5341         return false;
5342       if (!AA::isDynamicallyUnique(A, *this, **SimpleArgOp))
5343         return false;
5344       return unionAssumed(*SimpleArgOp);
5345     };
5346 
5347     // Generate a answer specific to a call site context.
5348     bool Success;
5349     bool AllCallSitesKnown;
5350     if (hasCallBaseContext() &&
5351         getCallBaseContext()->getCalledFunction() == Arg->getParent())
5352       Success = PredForCallSite(
5353           AbstractCallSite(&getCallBaseContext()->getCalledOperandUse()));
5354     else
5355       Success = A.checkForAllCallSites(PredForCallSite, *this, true,
5356                                        AllCallSitesKnown);
5357 
5358     if (!Success)
5359       if (!askSimplifiedValueForOtherAAs(A))
5360         return indicatePessimisticFixpoint();
5361 
5362     // If a candicate was found in this update, return CHANGED.
5363     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5364                                                : ChangeStatus ::CHANGED;
5365   }
5366 
5367   /// See AbstractAttribute::trackStatistics()
5368   void trackStatistics() const override {
5369     STATS_DECLTRACK_ARG_ATTR(value_simplify)
5370   }
5371 };
5372 
5373 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
5374   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
5375       : AAValueSimplifyImpl(IRP, A) {}
5376 
5377   /// See AAValueSimplify::getAssumedSimplifiedValue()
5378   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
5379     if (!isValidState())
5380       return nullptr;
5381     return SimplifiedAssociatedValue;
5382   }
5383 
5384   /// See AbstractAttribute::updateImpl(...).
5385   ChangeStatus updateImpl(Attributor &A) override {
5386     auto Before = SimplifiedAssociatedValue;
5387 
5388     auto PredForReturned = [&](Value &V) {
5389       return checkAndUpdate(A, *this,
5390                             IRPosition::value(V, getCallBaseContext()));
5391     };
5392 
5393     if (!A.checkForAllReturnedValues(PredForReturned, *this))
5394       if (!askSimplifiedValueForOtherAAs(A))
5395         return indicatePessimisticFixpoint();
5396 
5397     // If a candicate was found in this update, return CHANGED.
5398     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5399                                                : ChangeStatus ::CHANGED;
5400   }
5401 
5402   ChangeStatus manifest(Attributor &A) override {
5403     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5404 
5405     if (auto *NewV = getReplacementValue(A)) {
5406       auto PredForReturned =
5407           [&](Value &, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5408             for (ReturnInst *RI : RetInsts) {
5409               Value *ReturnedVal = RI->getReturnValue();
5410               if (ReturnedVal == NewV || isa<UndefValue>(ReturnedVal))
5411                 return true;
5412               assert(RI->getFunction() == getAnchorScope() &&
5413                      "ReturnInst in wrong function!");
5414               LLVM_DEBUG(dbgs()
5415                          << "[ValueSimplify] " << *ReturnedVal << " -> "
5416                          << *NewV << " in " << *RI << " :: " << *this << "\n");
5417               if (A.changeUseAfterManifest(RI->getOperandUse(0), *NewV))
5418                 Changed = ChangeStatus::CHANGED;
5419             }
5420             return true;
5421           };
5422       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
5423     }
5424 
5425     return Changed | AAValueSimplify::manifest(A);
5426   }
5427 
5428   /// See AbstractAttribute::trackStatistics()
5429   void trackStatistics() const override {
5430     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
5431   }
5432 };
5433 
5434 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
5435   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
5436       : AAValueSimplifyImpl(IRP, A) {}
5437 
5438   /// See AbstractAttribute::initialize(...).
5439   void initialize(Attributor &A) override {
5440     AAValueSimplifyImpl::initialize(A);
5441     Value &V = getAnchorValue();
5442 
5443     // TODO: add other stuffs
5444     if (isa<Constant>(V))
5445       indicatePessimisticFixpoint();
5446   }
5447 
5448   /// Check if \p Cmp is a comparison we can simplify.
5449   ///
5450   /// We handle multiple cases, one in which at least one operand is an
5451   /// (assumed) nullptr. If so, try to simplify it using AANonNull on the other
5452   /// operand. Return true if successful, in that case SimplifiedAssociatedValue
5453   /// will be updated.
5454   bool handleCmp(Attributor &A, CmpInst &Cmp) {
5455     auto Union = [&](Value &V) {
5456       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5457           SimplifiedAssociatedValue, &V, V.getType());
5458       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5459     };
5460 
5461     Value *LHS = Cmp.getOperand(0);
5462     Value *RHS = Cmp.getOperand(1);
5463 
5464     // Simplify the operands first.
5465     bool UsedAssumedInformation = false;
5466     const auto &SimplifiedLHS =
5467         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
5468                                *this, UsedAssumedInformation);
5469     if (!SimplifiedLHS.hasValue())
5470       return true;
5471     if (!SimplifiedLHS.getValue())
5472       return false;
5473     LHS = *SimplifiedLHS;
5474 
5475     const auto &SimplifiedRHS =
5476         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
5477                                *this, UsedAssumedInformation);
5478     if (!SimplifiedRHS.hasValue())
5479       return true;
5480     if (!SimplifiedRHS.getValue())
5481       return false;
5482     RHS = *SimplifiedRHS;
5483 
5484     LLVMContext &Ctx = Cmp.getContext();
5485     // Handle the trivial case first in which we don't even need to think about
5486     // null or non-null.
5487     if (LHS == RHS && (Cmp.isTrueWhenEqual() || Cmp.isFalseWhenEqual())) {
5488       Constant *NewVal =
5489           ConstantInt::get(Type::getInt1Ty(Ctx), Cmp.isTrueWhenEqual());
5490       if (!Union(*NewVal))
5491         return false;
5492       if (!UsedAssumedInformation)
5493         indicateOptimisticFixpoint();
5494       return true;
5495     }
5496 
5497     // From now on we only handle equalities (==, !=).
5498     ICmpInst *ICmp = dyn_cast<ICmpInst>(&Cmp);
5499     if (!ICmp || !ICmp->isEquality())
5500       return false;
5501 
5502     bool LHSIsNull = isa<ConstantPointerNull>(LHS);
5503     bool RHSIsNull = isa<ConstantPointerNull>(RHS);
5504     if (!LHSIsNull && !RHSIsNull)
5505       return false;
5506 
5507     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
5508     // non-nullptr operand and if we assume it's non-null we can conclude the
5509     // result of the comparison.
5510     assert((LHSIsNull || RHSIsNull) &&
5511            "Expected nullptr versus non-nullptr comparison at this point");
5512 
5513     // The index is the operand that we assume is not null.
5514     unsigned PtrIdx = LHSIsNull;
5515     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
5516         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
5517         DepClassTy::REQUIRED);
5518     if (!PtrNonNullAA.isAssumedNonNull())
5519       return false;
5520     UsedAssumedInformation |= !PtrNonNullAA.isKnownNonNull();
5521 
5522     // The new value depends on the predicate, true for != and false for ==.
5523     Constant *NewVal = ConstantInt::get(
5524         Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_NE);
5525     if (!Union(*NewVal))
5526       return false;
5527 
5528     if (!UsedAssumedInformation)
5529       indicateOptimisticFixpoint();
5530 
5531     return true;
5532   }
5533 
5534   bool updateWithLoad(Attributor &A, LoadInst &L) {
5535     auto Union = [&](Value &V) {
5536       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5537           SimplifiedAssociatedValue, &V, L.getType());
5538       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5539     };
5540     return handleLoad(A, *this, L, Union);
5541   }
5542 
5543   /// Use the generic, non-optimistic InstSimplfy functionality if we managed to
5544   /// simplify any operand of the instruction \p I. Return true if successful,
5545   /// in that case SimplifiedAssociatedValue will be updated.
5546   bool handleGenericInst(Attributor &A, Instruction &I) {
5547     bool SomeSimplified = false;
5548     bool UsedAssumedInformation = false;
5549 
5550     SmallVector<Value *, 8> NewOps(I.getNumOperands());
5551     int Idx = 0;
5552     for (Value *Op : I.operands()) {
5553       const auto &SimplifiedOp =
5554           A.getAssumedSimplified(IRPosition::value(*Op, getCallBaseContext()),
5555                                  *this, UsedAssumedInformation);
5556       // If we are not sure about any operand we are not sure about the entire
5557       // instruction, we'll wait.
5558       if (!SimplifiedOp.hasValue())
5559         return true;
5560 
5561       if (SimplifiedOp.getValue())
5562         NewOps[Idx] = SimplifiedOp.getValue();
5563       else
5564         NewOps[Idx] = Op;
5565 
5566       SomeSimplified |= (NewOps[Idx] != Op);
5567       ++Idx;
5568     }
5569 
5570     // We won't bother with the InstSimplify interface if we didn't simplify any
5571     // operand ourselves.
5572     if (!SomeSimplified)
5573       return false;
5574 
5575     InformationCache &InfoCache = A.getInfoCache();
5576     Function *F = I.getFunction();
5577     const auto *DT =
5578         InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
5579     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5580     auto *AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
5581     OptimizationRemarkEmitter *ORE = nullptr;
5582 
5583     const DataLayout &DL = I.getModule()->getDataLayout();
5584     SimplifyQuery Q(DL, TLI, DT, AC, &I);
5585     if (Value *SimplifiedI =
5586             SimplifyInstructionWithOperands(&I, NewOps, Q, ORE)) {
5587       SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5588           SimplifiedAssociatedValue, SimplifiedI, I.getType());
5589       return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5590     }
5591     return false;
5592   }
5593 
5594   /// See AbstractAttribute::updateImpl(...).
5595   ChangeStatus updateImpl(Attributor &A) override {
5596     auto Before = SimplifiedAssociatedValue;
5597 
5598     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
5599                             bool Stripped) -> bool {
5600       auto &AA = A.getAAFor<AAValueSimplify>(
5601           *this, IRPosition::value(V, getCallBaseContext()),
5602           DepClassTy::REQUIRED);
5603       if (!Stripped && this == &AA) {
5604 
5605         if (auto *I = dyn_cast<Instruction>(&V)) {
5606           if (auto *LI = dyn_cast<LoadInst>(&V))
5607             if (updateWithLoad(A, *LI))
5608               return true;
5609           if (auto *Cmp = dyn_cast<CmpInst>(&V))
5610             if (handleCmp(A, *Cmp))
5611               return true;
5612           if (handleGenericInst(A, *I))
5613             return true;
5614         }
5615         // TODO: Look the instruction and check recursively.
5616 
5617         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
5618                           << "\n");
5619         return false;
5620       }
5621       return checkAndUpdate(A, *this,
5622                             IRPosition::value(V, getCallBaseContext()));
5623     };
5624 
5625     bool Dummy = false;
5626     if (!genericValueTraversal<bool>(A, getIRPosition(), *this, Dummy,
5627                                      VisitValueCB, getCtxI(),
5628                                      /* UseValueSimplify */ false))
5629       if (!askSimplifiedValueForOtherAAs(A))
5630         return indicatePessimisticFixpoint();
5631 
5632     // If a candicate was found in this update, return CHANGED.
5633     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5634                                                : ChangeStatus ::CHANGED;
5635   }
5636 
5637   /// See AbstractAttribute::trackStatistics()
5638   void trackStatistics() const override {
5639     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
5640   }
5641 };
5642 
5643 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
5644   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
5645       : AAValueSimplifyImpl(IRP, A) {}
5646 
5647   /// See AbstractAttribute::initialize(...).
5648   void initialize(Attributor &A) override {
5649     SimplifiedAssociatedValue = nullptr;
5650     indicateOptimisticFixpoint();
5651   }
5652   /// See AbstractAttribute::initialize(...).
5653   ChangeStatus updateImpl(Attributor &A) override {
5654     llvm_unreachable(
5655         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
5656   }
5657   /// See AbstractAttribute::trackStatistics()
5658   void trackStatistics() const override {
5659     STATS_DECLTRACK_FN_ATTR(value_simplify)
5660   }
5661 };
5662 
5663 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5664   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5665       : AAValueSimplifyFunction(IRP, A) {}
5666   /// See AbstractAttribute::trackStatistics()
5667   void trackStatistics() const override {
5668     STATS_DECLTRACK_CS_ATTR(value_simplify)
5669   }
5670 };
5671 
5672 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyImpl {
5673   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5674       : AAValueSimplifyImpl(IRP, A) {}
5675 
5676   void initialize(Attributor &A) override {
5677     AAValueSimplifyImpl::initialize(A);
5678     if (!getAssociatedFunction())
5679       indicatePessimisticFixpoint();
5680   }
5681 
5682   /// See AbstractAttribute::updateImpl(...).
5683   ChangeStatus updateImpl(Attributor &A) override {
5684     auto Before = SimplifiedAssociatedValue;
5685     auto &RetAA = A.getAAFor<AAReturnedValues>(
5686         *this, IRPosition::function(*getAssociatedFunction()),
5687         DepClassTy::REQUIRED);
5688     auto PredForReturned =
5689         [&](Value &RetVal, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
5690           bool UsedAssumedInformation = false;
5691           Optional<Value *> CSRetVal = A.translateArgumentToCallSiteContent(
5692               &RetVal, *cast<CallBase>(getCtxI()), *this,
5693               UsedAssumedInformation);
5694           SimplifiedAssociatedValue = AA::combineOptionalValuesInAAValueLatice(
5695               SimplifiedAssociatedValue, CSRetVal, getAssociatedType());
5696           return SimplifiedAssociatedValue != Optional<Value *>(nullptr);
5697         };
5698     if (!RetAA.checkForAllReturnedValuesAndReturnInsts(PredForReturned))
5699       if (!askSimplifiedValueForOtherAAs(A))
5700         return indicatePessimisticFixpoint();
5701     return Before == SimplifiedAssociatedValue ? ChangeStatus::UNCHANGED
5702                                                : ChangeStatus ::CHANGED;
5703   }
5704 
5705   void trackStatistics() const override {
5706     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5707   }
5708 };
5709 
5710 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5711   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5712       : AAValueSimplifyFloating(IRP, A) {}
5713 
5714   /// See AbstractAttribute::manifest(...).
5715   ChangeStatus manifest(Attributor &A) override {
5716     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5717 
5718     if (auto *NewV = getReplacementValue(A)) {
5719       Use &U = cast<CallBase>(&getAnchorValue())
5720                    ->getArgOperandUse(getCallSiteArgNo());
5721       if (A.changeUseAfterManifest(U, *NewV))
5722         Changed = ChangeStatus::CHANGED;
5723     }
5724 
5725     return Changed | AAValueSimplify::manifest(A);
5726   }
5727 
5728   void trackStatistics() const override {
5729     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5730   }
5731 };
5732 
5733 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5734 struct AAHeapToStackFunction final : public AAHeapToStack {
5735 
5736   struct AllocationInfo {
5737     /// The call that allocates the memory.
5738     CallBase *const CB;
5739 
5740     /// The kind of allocation.
5741     const enum class AllocationKind {
5742       MALLOC,
5743       CALLOC,
5744       ALIGNED_ALLOC,
5745     } Kind;
5746 
5747     /// The library function id for the allocation.
5748     LibFunc LibraryFunctionId = NotLibFunc;
5749 
5750     /// The status wrt. a rewrite.
5751     enum {
5752       STACK_DUE_TO_USE,
5753       STACK_DUE_TO_FREE,
5754       INVALID,
5755     } Status = STACK_DUE_TO_USE;
5756 
5757     /// Flag to indicate if we encountered a use that might free this allocation
5758     /// but which is not in the deallocation infos.
5759     bool HasPotentiallyFreeingUnknownUses = false;
5760 
5761     /// The set of free calls that use this allocation.
5762     SmallPtrSet<CallBase *, 1> PotentialFreeCalls{};
5763   };
5764 
5765   struct DeallocationInfo {
5766     /// The call that deallocates the memory.
5767     CallBase *const CB;
5768 
5769     /// Flag to indicate if we don't know all objects this deallocation might
5770     /// free.
5771     bool MightFreeUnknownObjects = false;
5772 
5773     /// The set of allocation calls that are potentially freed.
5774     SmallPtrSet<CallBase *, 1> PotentialAllocationCalls{};
5775   };
5776 
5777   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5778       : AAHeapToStack(IRP, A) {}
5779 
5780   ~AAHeapToStackFunction() {
5781     // Ensure we call the destructor so we release any memory allocated in the
5782     // sets.
5783     for (auto &It : AllocationInfos)
5784       It.getSecond()->~AllocationInfo();
5785     for (auto &It : DeallocationInfos)
5786       It.getSecond()->~DeallocationInfo();
5787   }
5788 
5789   void initialize(Attributor &A) override {
5790     AAHeapToStack::initialize(A);
5791 
5792     const Function *F = getAnchorScope();
5793     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5794 
5795     auto AllocationIdentifierCB = [&](Instruction &I) {
5796       CallBase *CB = dyn_cast<CallBase>(&I);
5797       if (!CB)
5798         return true;
5799       if (isFreeCall(CB, TLI)) {
5800         DeallocationInfos[CB] = new (A.Allocator) DeallocationInfo{CB};
5801         return true;
5802       }
5803       bool IsMalloc = isMallocLikeFn(CB, TLI);
5804       bool IsAlignedAllocLike = !IsMalloc && isAlignedAllocLikeFn(CB, TLI);
5805       bool IsCalloc =
5806           !IsMalloc && !IsAlignedAllocLike && isCallocLikeFn(CB, TLI);
5807       if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc)
5808         return true;
5809       auto Kind =
5810           IsMalloc ? AllocationInfo::AllocationKind::MALLOC
5811                    : (IsCalloc ? AllocationInfo::AllocationKind::CALLOC
5812                                : AllocationInfo::AllocationKind::ALIGNED_ALLOC);
5813 
5814       AllocationInfo *AI = new (A.Allocator) AllocationInfo{CB, Kind};
5815       AllocationInfos[CB] = AI;
5816       TLI->getLibFunc(*CB, AI->LibraryFunctionId);
5817       return true;
5818     };
5819 
5820     bool UsedAssumedInformation = false;
5821     bool Success = A.checkForAllCallLikeInstructions(
5822         AllocationIdentifierCB, *this, UsedAssumedInformation,
5823         /* CheckBBLivenessOnly */ false,
5824         /* CheckPotentiallyDead */ true);
5825     (void)Success;
5826     assert(Success && "Did not expect the call base visit callback to fail!");
5827   }
5828 
5829   const std::string getAsStr() const override {
5830     unsigned NumH2SMallocs = 0, NumInvalidMallocs = 0;
5831     for (const auto &It : AllocationInfos) {
5832       if (It.second->Status == AllocationInfo::INVALID)
5833         ++NumInvalidMallocs;
5834       else
5835         ++NumH2SMallocs;
5836     }
5837     return "[H2S] Mallocs Good/Bad: " + std::to_string(NumH2SMallocs) + "/" +
5838            std::to_string(NumInvalidMallocs);
5839   }
5840 
5841   /// See AbstractAttribute::trackStatistics().
5842   void trackStatistics() const override {
5843     STATS_DECL(
5844         MallocCalls, Function,
5845         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5846     for (auto &It : AllocationInfos)
5847       if (It.second->Status != AllocationInfo::INVALID)
5848         ++BUILD_STAT_NAME(MallocCalls, Function);
5849   }
5850 
5851   bool isAssumedHeapToStack(const CallBase &CB) const override {
5852     if (isValidState())
5853       if (AllocationInfo *AI = AllocationInfos.lookup(&CB))
5854         return AI->Status != AllocationInfo::INVALID;
5855     return false;
5856   }
5857 
5858   bool isAssumedHeapToStackRemovedFree(CallBase &CB) const override {
5859     if (!isValidState())
5860       return false;
5861 
5862     for (auto &It : AllocationInfos) {
5863       AllocationInfo &AI = *It.second;
5864       if (AI.Status == AllocationInfo::INVALID)
5865         continue;
5866 
5867       if (AI.PotentialFreeCalls.count(&CB))
5868         return true;
5869     }
5870 
5871     return false;
5872   }
5873 
5874   ChangeStatus manifest(Attributor &A) override {
5875     assert(getState().isValidState() &&
5876            "Attempted to manifest an invalid state!");
5877 
5878     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5879     Function *F = getAnchorScope();
5880     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5881 
5882     for (auto &It : AllocationInfos) {
5883       AllocationInfo &AI = *It.second;
5884       if (AI.Status == AllocationInfo::INVALID)
5885         continue;
5886 
5887       for (CallBase *FreeCall : AI.PotentialFreeCalls) {
5888         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5889         A.deleteAfterManifest(*FreeCall);
5890         HasChanged = ChangeStatus::CHANGED;
5891       }
5892 
5893       LLVM_DEBUG(dbgs() << "H2S: Removing malloc-like call: " << *AI.CB
5894                         << "\n");
5895 
5896       auto Remark = [&](OptimizationRemark OR) {
5897         LibFunc IsAllocShared;
5898         if (TLI->getLibFunc(*AI.CB, IsAllocShared))
5899           if (IsAllocShared == LibFunc___kmpc_alloc_shared)
5900             return OR << "Moving globalized variable to the stack.";
5901         return OR << "Moving memory allocation from the heap to the stack.";
5902       };
5903       if (AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
5904         A.emitRemark<OptimizationRemark>(AI.CB, "OMP110", Remark);
5905       else
5906         A.emitRemark<OptimizationRemark>(AI.CB, "HeapToStack", Remark);
5907 
5908       Value *Size;
5909       Optional<APInt> SizeAPI = getSize(A, *this, AI);
5910       if (SizeAPI.hasValue()) {
5911         Size = ConstantInt::get(AI.CB->getContext(), *SizeAPI);
5912       } else if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5913         auto *Num = AI.CB->getOperand(0);
5914         auto *SizeT = AI.CB->getOperand(1);
5915         IRBuilder<> B(AI.CB);
5916         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5917       } else if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5918         Size = AI.CB->getOperand(1);
5919       } else {
5920         Size = AI.CB->getOperand(0);
5921       }
5922 
5923       Align Alignment(1);
5924       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC) {
5925         Optional<APInt> AlignmentAPI =
5926             getAPInt(A, *this, *AI.CB->getArgOperand(0));
5927         assert(AlignmentAPI.hasValue() &&
5928                "Expected an alignment during manifest!");
5929         Alignment =
5930             max(Alignment, MaybeAlign(AlignmentAPI.getValue().getZExtValue()));
5931       }
5932 
5933       unsigned AS = cast<PointerType>(AI.CB->getType())->getAddressSpace();
5934       Instruction *Alloca =
5935           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5936                          "", AI.CB->getNextNode());
5937 
5938       if (Alloca->getType() != AI.CB->getType())
5939         Alloca = new BitCastInst(Alloca, AI.CB->getType(), "malloc_bc",
5940                                  Alloca->getNextNode());
5941 
5942       A.changeValueAfterManifest(*AI.CB, *Alloca);
5943 
5944       if (auto *II = dyn_cast<InvokeInst>(AI.CB)) {
5945         auto *NBB = II->getNormalDest();
5946         BranchInst::Create(NBB, AI.CB->getParent());
5947         A.deleteAfterManifest(*AI.CB);
5948       } else {
5949         A.deleteAfterManifest(*AI.CB);
5950       }
5951 
5952       // Zero out the allocated memory if it was a calloc.
5953       if (AI.Kind == AllocationInfo::AllocationKind::CALLOC) {
5954         auto *BI = new BitCastInst(Alloca, AI.CB->getType(), "calloc_bc",
5955                                    Alloca->getNextNode());
5956         Value *Ops[] = {
5957             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5958             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5959 
5960         Type *Tys[] = {BI->getType(), AI.CB->getOperand(0)->getType()};
5961         Module *M = F->getParent();
5962         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5963         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5964       }
5965       HasChanged = ChangeStatus::CHANGED;
5966     }
5967 
5968     return HasChanged;
5969   }
5970 
5971   Optional<APInt> getAPInt(Attributor &A, const AbstractAttribute &AA,
5972                            Value &V) {
5973     bool UsedAssumedInformation = false;
5974     Optional<Constant *> SimpleV =
5975         A.getAssumedConstant(V, AA, UsedAssumedInformation);
5976     if (!SimpleV.hasValue())
5977       return APInt(64, 0);
5978     if (auto *CI = dyn_cast_or_null<ConstantInt>(SimpleV.getValue()))
5979       return CI->getValue();
5980     return llvm::None;
5981   }
5982 
5983   Optional<APInt> getSize(Attributor &A, const AbstractAttribute &AA,
5984                           AllocationInfo &AI) {
5985 
5986     if (AI.Kind == AllocationInfo::AllocationKind::MALLOC)
5987       return getAPInt(A, AA, *AI.CB->getArgOperand(0));
5988 
5989     if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
5990       // Only if the alignment is also constant we return a size.
5991       return getAPInt(A, AA, *AI.CB->getArgOperand(0)).hasValue()
5992                  ? getAPInt(A, AA, *AI.CB->getArgOperand(1))
5993                  : llvm::None;
5994 
5995     assert(AI.Kind == AllocationInfo::AllocationKind::CALLOC &&
5996            "Expected only callocs are left");
5997     Optional<APInt> Num = getAPInt(A, AA, *AI.CB->getArgOperand(0));
5998     Optional<APInt> Size = getAPInt(A, AA, *AI.CB->getArgOperand(1));
5999     if (!Num.hasValue() || !Size.hasValue())
6000       return llvm::None;
6001     bool Overflow = false;
6002     Size = Size.getValue().umul_ov(Num.getValue(), Overflow);
6003     return Overflow ? llvm::None : Size;
6004   }
6005 
6006   /// Collection of all malloc-like calls in a function with associated
6007   /// information.
6008   DenseMap<CallBase *, AllocationInfo *> AllocationInfos;
6009 
6010   /// Collection of all free-like calls in a function with associated
6011   /// information.
6012   DenseMap<CallBase *, DeallocationInfo *> DeallocationInfos;
6013 
6014   ChangeStatus updateImpl(Attributor &A) override;
6015 };
6016 
6017 ChangeStatus AAHeapToStackFunction::updateImpl(Attributor &A) {
6018   ChangeStatus Changed = ChangeStatus::UNCHANGED;
6019   const Function *F = getAnchorScope();
6020 
6021   const auto &LivenessAA =
6022       A.getAAFor<AAIsDead>(*this, IRPosition::function(*F), DepClassTy::NONE);
6023 
6024   MustBeExecutedContextExplorer &Explorer =
6025       A.getInfoCache().getMustBeExecutedContextExplorer();
6026 
6027   bool StackIsAccessibleByOtherThreads =
6028       A.getInfoCache().stackIsAccessibleByOtherThreads();
6029 
6030   // Flag to ensure we update our deallocation information at most once per
6031   // updateImpl call and only if we use the free check reasoning.
6032   bool HasUpdatedFrees = false;
6033 
6034   auto UpdateFrees = [&]() {
6035     HasUpdatedFrees = true;
6036 
6037     for (auto &It : DeallocationInfos) {
6038       DeallocationInfo &DI = *It.second;
6039       // For now we cannot use deallocations that have unknown inputs, skip
6040       // them.
6041       if (DI.MightFreeUnknownObjects)
6042         continue;
6043 
6044       // No need to analyze dead calls, ignore them instead.
6045       bool UsedAssumedInformation = false;
6046       if (A.isAssumedDead(*DI.CB, this, &LivenessAA, UsedAssumedInformation,
6047                           /* CheckBBLivenessOnly */ true))
6048         continue;
6049 
6050       // Use the optimistic version to get the freed objects, ignoring dead
6051       // branches etc.
6052       SmallVector<Value *, 8> Objects;
6053       if (!AA::getAssumedUnderlyingObjects(A, *DI.CB->getArgOperand(0), Objects,
6054                                            *this, DI.CB)) {
6055         LLVM_DEBUG(
6056             dbgs()
6057             << "[H2S] Unexpected failure in getAssumedUnderlyingObjects!\n");
6058         DI.MightFreeUnknownObjects = true;
6059         continue;
6060       }
6061 
6062       // Check each object explicitly.
6063       for (auto *Obj : Objects) {
6064         // Free of null and undef can be ignored as no-ops (or UB in the latter
6065         // case).
6066         if (isa<ConstantPointerNull>(Obj) || isa<UndefValue>(Obj))
6067           continue;
6068 
6069         CallBase *ObjCB = dyn_cast<CallBase>(Obj);
6070         if (!ObjCB) {
6071           LLVM_DEBUG(dbgs()
6072                      << "[H2S] Free of a non-call object: " << *Obj << "\n");
6073           DI.MightFreeUnknownObjects = true;
6074           continue;
6075         }
6076 
6077         AllocationInfo *AI = AllocationInfos.lookup(ObjCB);
6078         if (!AI) {
6079           LLVM_DEBUG(dbgs() << "[H2S] Free of a non-allocation object: " << *Obj
6080                             << "\n");
6081           DI.MightFreeUnknownObjects = true;
6082           continue;
6083         }
6084 
6085         DI.PotentialAllocationCalls.insert(ObjCB);
6086       }
6087     }
6088   };
6089 
6090   auto FreeCheck = [&](AllocationInfo &AI) {
6091     // If the stack is not accessible by other threads, the "must-free" logic
6092     // doesn't apply as the pointer could be shared and needs to be places in
6093     // "shareable" memory.
6094     if (!StackIsAccessibleByOtherThreads) {
6095       auto &NoSyncAA =
6096           A.getAAFor<AANoSync>(*this, getIRPosition(), DepClassTy::OPTIONAL);
6097       if (!NoSyncAA.isAssumedNoSync()) {
6098         LLVM_DEBUG(
6099             dbgs() << "[H2S] found an escaping use, stack is not accessible by "
6100                       "other threads and function is not nosync:\n");
6101         return false;
6102       }
6103     }
6104     if (!HasUpdatedFrees)
6105       UpdateFrees();
6106 
6107     // TODO: Allow multi exit functions that have different free calls.
6108     if (AI.PotentialFreeCalls.size() != 1) {
6109       LLVM_DEBUG(dbgs() << "[H2S] did not find one free call but "
6110                         << AI.PotentialFreeCalls.size() << "\n");
6111       return false;
6112     }
6113     CallBase *UniqueFree = *AI.PotentialFreeCalls.begin();
6114     DeallocationInfo *DI = DeallocationInfos.lookup(UniqueFree);
6115     if (!DI) {
6116       LLVM_DEBUG(
6117           dbgs() << "[H2S] unique free call was not known as deallocation call "
6118                  << *UniqueFree << "\n");
6119       return false;
6120     }
6121     if (DI->MightFreeUnknownObjects) {
6122       LLVM_DEBUG(
6123           dbgs() << "[H2S] unique free call might free unknown allocations\n");
6124       return false;
6125     }
6126     if (DI->PotentialAllocationCalls.size() > 1) {
6127       LLVM_DEBUG(dbgs() << "[H2S] unique free call might free "
6128                         << DI->PotentialAllocationCalls.size()
6129                         << " different allocations\n");
6130       return false;
6131     }
6132     if (*DI->PotentialAllocationCalls.begin() != AI.CB) {
6133       LLVM_DEBUG(
6134           dbgs()
6135           << "[H2S] unique free call not known to free this allocation but "
6136           << **DI->PotentialAllocationCalls.begin() << "\n");
6137       return false;
6138     }
6139     Instruction *CtxI = isa<InvokeInst>(AI.CB) ? AI.CB : AI.CB->getNextNode();
6140     if (!Explorer.findInContextOf(UniqueFree, CtxI)) {
6141       LLVM_DEBUG(
6142           dbgs()
6143           << "[H2S] unique free call might not be executed with the allocation "
6144           << *UniqueFree << "\n");
6145       return false;
6146     }
6147     return true;
6148   };
6149 
6150   auto UsesCheck = [&](AllocationInfo &AI) {
6151     bool ValidUsesOnly = true;
6152 
6153     auto Pred = [&](const Use &U, bool &Follow) -> bool {
6154       Instruction *UserI = cast<Instruction>(U.getUser());
6155       if (isa<LoadInst>(UserI))
6156         return true;
6157       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
6158         if (SI->getValueOperand() == U.get()) {
6159           LLVM_DEBUG(dbgs()
6160                      << "[H2S] escaping store to memory: " << *UserI << "\n");
6161           ValidUsesOnly = false;
6162         } else {
6163           // A store into the malloc'ed memory is fine.
6164         }
6165         return true;
6166       }
6167       if (auto *CB = dyn_cast<CallBase>(UserI)) {
6168         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
6169           return true;
6170         if (DeallocationInfos.count(CB)) {
6171           AI.PotentialFreeCalls.insert(CB);
6172           return true;
6173         }
6174 
6175         unsigned ArgNo = CB->getArgOperandNo(&U);
6176 
6177         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
6178             *this, IRPosition::callsite_argument(*CB, ArgNo),
6179             DepClassTy::OPTIONAL);
6180 
6181         // If a call site argument use is nofree, we are fine.
6182         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
6183             *this, IRPosition::callsite_argument(*CB, ArgNo),
6184             DepClassTy::OPTIONAL);
6185 
6186         bool MaybeCaptured = !NoCaptureAA.isAssumedNoCapture();
6187         bool MaybeFreed = !ArgNoFreeAA.isAssumedNoFree();
6188         if (MaybeCaptured ||
6189             (AI.LibraryFunctionId != LibFunc___kmpc_alloc_shared &&
6190              MaybeFreed)) {
6191           AI.HasPotentiallyFreeingUnknownUses |= MaybeFreed;
6192 
6193           // Emit a missed remark if this is missed OpenMP globalization.
6194           auto Remark = [&](OptimizationRemarkMissed ORM) {
6195             return ORM
6196                    << "Could not move globalized variable to the stack. "
6197                       "Variable is potentially captured in call. Mark "
6198                       "parameter as `__attribute__((noescape))` to override.";
6199           };
6200 
6201           if (ValidUsesOnly &&
6202               AI.LibraryFunctionId == LibFunc___kmpc_alloc_shared)
6203             A.emitRemark<OptimizationRemarkMissed>(AI.CB, "OMP113", Remark);
6204 
6205           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
6206           ValidUsesOnly = false;
6207         }
6208         return true;
6209       }
6210 
6211       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
6212           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
6213         Follow = true;
6214         return true;
6215       }
6216       // Unknown user for which we can not track uses further (in a way that
6217       // makes sense).
6218       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
6219       ValidUsesOnly = false;
6220       return true;
6221     };
6222     if (!A.checkForAllUses(Pred, *this, *AI.CB))
6223       return false;
6224     return ValidUsesOnly;
6225   };
6226 
6227   // The actual update starts here. We look at all allocations and depending on
6228   // their status perform the appropriate check(s).
6229   for (auto &It : AllocationInfos) {
6230     AllocationInfo &AI = *It.second;
6231     if (AI.Status == AllocationInfo::INVALID)
6232       continue;
6233 
6234     if (MaxHeapToStackSize == -1) {
6235       if (AI.Kind == AllocationInfo::AllocationKind::ALIGNED_ALLOC)
6236         if (!getAPInt(A, *this, *AI.CB->getArgOperand(0)).hasValue()) {
6237           LLVM_DEBUG(dbgs() << "[H2S] Unknown allocation alignment: " << *AI.CB
6238                             << "\n");
6239           AI.Status = AllocationInfo::INVALID;
6240           Changed = ChangeStatus::CHANGED;
6241           continue;
6242         }
6243     } else {
6244       Optional<APInt> Size = getSize(A, *this, AI);
6245       if (!Size.hasValue() || Size.getValue().ugt(MaxHeapToStackSize)) {
6246         LLVM_DEBUG({
6247           if (!Size.hasValue())
6248             dbgs() << "[H2S] Unknown allocation size (or alignment): " << *AI.CB
6249                    << "\n";
6250           else
6251             dbgs() << "[H2S] Allocation size too large: " << *AI.CB << " vs. "
6252                    << MaxHeapToStackSize << "\n";
6253         });
6254 
6255         AI.Status = AllocationInfo::INVALID;
6256         Changed = ChangeStatus::CHANGED;
6257         continue;
6258       }
6259     }
6260 
6261     switch (AI.Status) {
6262     case AllocationInfo::STACK_DUE_TO_USE:
6263       if (UsesCheck(AI))
6264         continue;
6265       AI.Status = AllocationInfo::STACK_DUE_TO_FREE;
6266       LLVM_FALLTHROUGH;
6267     case AllocationInfo::STACK_DUE_TO_FREE:
6268       if (FreeCheck(AI))
6269         continue;
6270       AI.Status = AllocationInfo::INVALID;
6271       Changed = ChangeStatus::CHANGED;
6272       continue;
6273     case AllocationInfo::INVALID:
6274       llvm_unreachable("Invalid allocations should never reach this point!");
6275     };
6276   }
6277 
6278   return Changed;
6279 }
6280 
6281 /// ----------------------- Privatizable Pointers ------------------------------
6282 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
6283   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
6284       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
6285 
6286   ChangeStatus indicatePessimisticFixpoint() override {
6287     AAPrivatizablePtr::indicatePessimisticFixpoint();
6288     PrivatizableType = nullptr;
6289     return ChangeStatus::CHANGED;
6290   }
6291 
6292   /// Identify the type we can chose for a private copy of the underlying
6293   /// argument. None means it is not clear yet, nullptr means there is none.
6294   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
6295 
6296   /// Return a privatizable type that encloses both T0 and T1.
6297   /// TODO: This is merely a stub for now as we should manage a mapping as well.
6298   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
6299     if (!T0.hasValue())
6300       return T1;
6301     if (!T1.hasValue())
6302       return T0;
6303     if (T0 == T1)
6304       return T0;
6305     return nullptr;
6306   }
6307 
6308   Optional<Type *> getPrivatizableType() const override {
6309     return PrivatizableType;
6310   }
6311 
6312   const std::string getAsStr() const override {
6313     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
6314   }
6315 
6316 protected:
6317   Optional<Type *> PrivatizableType;
6318 };
6319 
6320 // TODO: Do this for call site arguments (probably also other values) as well.
6321 
6322 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
6323   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
6324       : AAPrivatizablePtrImpl(IRP, A) {}
6325 
6326   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6327   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6328     // If this is a byval argument and we know all the call sites (so we can
6329     // rewrite them), there is no need to check them explicitly.
6330     bool AllCallSitesKnown;
6331     if (getIRPosition().hasAttr(Attribute::ByVal) &&
6332         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
6333                                true, AllCallSitesKnown))
6334       return getAssociatedValue().getType()->getPointerElementType();
6335 
6336     Optional<Type *> Ty;
6337     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
6338 
6339     // Make sure the associated call site argument has the same type at all call
6340     // sites and it is an allocation we know is safe to privatize, for now that
6341     // means we only allow alloca instructions.
6342     // TODO: We can additionally analyze the accesses in the callee to  create
6343     //       the type from that information instead. That is a little more
6344     //       involved and will be done in a follow up patch.
6345     auto CallSiteCheck = [&](AbstractCallSite ACS) {
6346       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
6347       // Check if a coresponding argument was found or if it is one not
6348       // associated (which can happen for callback calls).
6349       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
6350         return false;
6351 
6352       // Check that all call sites agree on a type.
6353       auto &PrivCSArgAA =
6354           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
6355       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
6356 
6357       LLVM_DEBUG({
6358         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
6359         if (CSTy.hasValue() && CSTy.getValue())
6360           CSTy.getValue()->print(dbgs());
6361         else if (CSTy.hasValue())
6362           dbgs() << "<nullptr>";
6363         else
6364           dbgs() << "<none>";
6365       });
6366 
6367       Ty = combineTypes(Ty, CSTy);
6368 
6369       LLVM_DEBUG({
6370         dbgs() << " : New Type: ";
6371         if (Ty.hasValue() && Ty.getValue())
6372           Ty.getValue()->print(dbgs());
6373         else if (Ty.hasValue())
6374           dbgs() << "<nullptr>";
6375         else
6376           dbgs() << "<none>";
6377         dbgs() << "\n";
6378       });
6379 
6380       return !Ty.hasValue() || Ty.getValue();
6381     };
6382 
6383     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
6384       return nullptr;
6385     return Ty;
6386   }
6387 
6388   /// See AbstractAttribute::updateImpl(...).
6389   ChangeStatus updateImpl(Attributor &A) override {
6390     PrivatizableType = identifyPrivatizableType(A);
6391     if (!PrivatizableType.hasValue())
6392       return ChangeStatus::UNCHANGED;
6393     if (!PrivatizableType.getValue())
6394       return indicatePessimisticFixpoint();
6395 
6396     // The dependence is optional so we don't give up once we give up on the
6397     // alignment.
6398     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
6399                         DepClassTy::OPTIONAL);
6400 
6401     // Avoid arguments with padding for now.
6402     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
6403         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
6404                                                 A.getInfoCache().getDL())) {
6405       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
6406       return indicatePessimisticFixpoint();
6407     }
6408 
6409     // Verify callee and caller agree on how the promoted argument would be
6410     // passed.
6411     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
6412     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
6413     // which doesn't require the arguments ArgumentPromotion wanted to pass.
6414     Function &Fn = *getIRPosition().getAnchorScope();
6415     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
6416     ArgsToPromote.insert(getAssociatedArgument());
6417     const auto *TTI =
6418         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
6419     if (!TTI ||
6420         !ArgumentPromotionPass::areFunctionArgsABICompatible(
6421             Fn, *TTI, ArgsToPromote, Dummy) ||
6422         ArgsToPromote.empty()) {
6423       LLVM_DEBUG(
6424           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
6425                  << Fn.getName() << "\n");
6426       return indicatePessimisticFixpoint();
6427     }
6428 
6429     // Collect the types that will replace the privatizable type in the function
6430     // signature.
6431     SmallVector<Type *, 16> ReplacementTypes;
6432     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6433 
6434     // Register a rewrite of the argument.
6435     Argument *Arg = getAssociatedArgument();
6436     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
6437       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
6438       return indicatePessimisticFixpoint();
6439     }
6440 
6441     unsigned ArgNo = Arg->getArgNo();
6442 
6443     // Helper to check if for the given call site the associated argument is
6444     // passed to a callback where the privatization would be different.
6445     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
6446       SmallVector<const Use *, 4> CallbackUses;
6447       AbstractCallSite::getCallbackUses(CB, CallbackUses);
6448       for (const Use *U : CallbackUses) {
6449         AbstractCallSite CBACS(U);
6450         assert(CBACS && CBACS.isCallbackCall());
6451         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
6452           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
6453 
6454           LLVM_DEBUG({
6455             dbgs()
6456                 << "[AAPrivatizablePtr] Argument " << *Arg
6457                 << "check if can be privatized in the context of its parent ("
6458                 << Arg->getParent()->getName()
6459                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
6460                    "callback ("
6461                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6462                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
6463                 << CBACS.getCallArgOperand(CBArg) << " vs "
6464                 << CB.getArgOperand(ArgNo) << "\n"
6465                 << "[AAPrivatizablePtr] " << CBArg << " : "
6466                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
6467           });
6468 
6469           if (CBArgNo != int(ArgNo))
6470             continue;
6471           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6472               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
6473           if (CBArgPrivAA.isValidState()) {
6474             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
6475             if (!CBArgPrivTy.hasValue())
6476               continue;
6477             if (CBArgPrivTy.getValue() == PrivatizableType)
6478               continue;
6479           }
6480 
6481           LLVM_DEBUG({
6482             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6483                    << " cannot be privatized in the context of its parent ("
6484                    << Arg->getParent()->getName()
6485                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
6486                       "callback ("
6487                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
6488                    << ").\n[AAPrivatizablePtr] for which the argument "
6489                       "privatization is not compatible.\n";
6490           });
6491           return false;
6492         }
6493       }
6494       return true;
6495     };
6496 
6497     // Helper to check if for the given call site the associated argument is
6498     // passed to a direct call where the privatization would be different.
6499     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
6500       CallBase *DC = cast<CallBase>(ACS.getInstruction());
6501       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
6502       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->arg_size() &&
6503              "Expected a direct call operand for callback call operand");
6504 
6505       LLVM_DEBUG({
6506         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6507                << " check if be privatized in the context of its parent ("
6508                << Arg->getParent()->getName()
6509                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6510                   "direct call of ("
6511                << DCArgNo << "@" << DC->getCalledFunction()->getName()
6512                << ").\n";
6513       });
6514 
6515       Function *DCCallee = DC->getCalledFunction();
6516       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
6517         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
6518             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
6519             DepClassTy::REQUIRED);
6520         if (DCArgPrivAA.isValidState()) {
6521           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
6522           if (!DCArgPrivTy.hasValue())
6523             return true;
6524           if (DCArgPrivTy.getValue() == PrivatizableType)
6525             return true;
6526         }
6527       }
6528 
6529       LLVM_DEBUG({
6530         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
6531                << " cannot be privatized in the context of its parent ("
6532                << Arg->getParent()->getName()
6533                << ")\n[AAPrivatizablePtr] because it is an argument in a "
6534                   "direct call of ("
6535                << ACS.getInstruction()->getCalledFunction()->getName()
6536                << ").\n[AAPrivatizablePtr] for which the argument "
6537                   "privatization is not compatible.\n";
6538       });
6539       return false;
6540     };
6541 
6542     // Helper to check if the associated argument is used at the given abstract
6543     // call site in a way that is incompatible with the privatization assumed
6544     // here.
6545     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
6546       if (ACS.isDirectCall())
6547         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
6548       if (ACS.isCallbackCall())
6549         return IsCompatiblePrivArgOfDirectCS(ACS);
6550       return false;
6551     };
6552 
6553     bool AllCallSitesKnown;
6554     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
6555                                 AllCallSitesKnown))
6556       return indicatePessimisticFixpoint();
6557 
6558     return ChangeStatus::UNCHANGED;
6559   }
6560 
6561   /// Given a type to private \p PrivType, collect the constituates (which are
6562   /// used) in \p ReplacementTypes.
6563   static void
6564   identifyReplacementTypes(Type *PrivType,
6565                            SmallVectorImpl<Type *> &ReplacementTypes) {
6566     // TODO: For now we expand the privatization type to the fullest which can
6567     //       lead to dead arguments that need to be removed later.
6568     assert(PrivType && "Expected privatizable type!");
6569 
6570     // Traverse the type, extract constituate types on the outermost level.
6571     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6572       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
6573         ReplacementTypes.push_back(PrivStructType->getElementType(u));
6574     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6575       ReplacementTypes.append(PrivArrayType->getNumElements(),
6576                               PrivArrayType->getElementType());
6577     } else {
6578       ReplacementTypes.push_back(PrivType);
6579     }
6580   }
6581 
6582   /// Initialize \p Base according to the type \p PrivType at position \p IP.
6583   /// The values needed are taken from the arguments of \p F starting at
6584   /// position \p ArgNo.
6585   static void createInitialization(Type *PrivType, Value &Base, Function &F,
6586                                    unsigned ArgNo, Instruction &IP) {
6587     assert(PrivType && "Expected privatizable type!");
6588 
6589     IRBuilder<NoFolder> IRB(&IP);
6590     const DataLayout &DL = F.getParent()->getDataLayout();
6591 
6592     // Traverse the type, build GEPs and stores.
6593     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6594       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6595       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6596         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
6597         Value *Ptr =
6598             constructPointer(PointeeTy, PrivType, &Base,
6599                              PrivStructLayout->getElementOffset(u), IRB, DL);
6600         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6601       }
6602     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6603       Type *PointeeTy = PrivArrayType->getElementType();
6604       Type *PointeePtrTy = PointeeTy->getPointerTo();
6605       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6606       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6607         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
6608                                       u * PointeeTySize, IRB, DL);
6609         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
6610       }
6611     } else {
6612       new StoreInst(F.getArg(ArgNo), &Base, &IP);
6613     }
6614   }
6615 
6616   /// Extract values from \p Base according to the type \p PrivType at the
6617   /// call position \p ACS. The values are appended to \p ReplacementValues.
6618   void createReplacementValues(Align Alignment, Type *PrivType,
6619                                AbstractCallSite ACS, Value *Base,
6620                                SmallVectorImpl<Value *> &ReplacementValues) {
6621     assert(Base && "Expected base value!");
6622     assert(PrivType && "Expected privatizable type!");
6623     Instruction *IP = ACS.getInstruction();
6624 
6625     IRBuilder<NoFolder> IRB(IP);
6626     const DataLayout &DL = IP->getModule()->getDataLayout();
6627 
6628     if (Base->getType()->getPointerElementType() != PrivType)
6629       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
6630                                                  "", ACS.getInstruction());
6631 
6632     // Traverse the type, build GEPs and loads.
6633     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
6634       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
6635       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
6636         Type *PointeeTy = PrivStructType->getElementType(u);
6637         Value *Ptr =
6638             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
6639                              PrivStructLayout->getElementOffset(u), IRB, DL);
6640         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6641         L->setAlignment(Alignment);
6642         ReplacementValues.push_back(L);
6643       }
6644     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
6645       Type *PointeeTy = PrivArrayType->getElementType();
6646       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
6647       Type *PointeePtrTy = PointeeTy->getPointerTo();
6648       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
6649         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
6650                                       u * PointeeTySize, IRB, DL);
6651         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
6652         L->setAlignment(Alignment);
6653         ReplacementValues.push_back(L);
6654       }
6655     } else {
6656       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
6657       L->setAlignment(Alignment);
6658       ReplacementValues.push_back(L);
6659     }
6660   }
6661 
6662   /// See AbstractAttribute::manifest(...)
6663   ChangeStatus manifest(Attributor &A) override {
6664     if (!PrivatizableType.hasValue())
6665       return ChangeStatus::UNCHANGED;
6666     assert(PrivatizableType.getValue() && "Expected privatizable type!");
6667 
6668     // Collect all tail calls in the function as we cannot allow new allocas to
6669     // escape into tail recursion.
6670     // TODO: Be smarter about new allocas escaping into tail calls.
6671     SmallVector<CallInst *, 16> TailCalls;
6672     bool UsedAssumedInformation = false;
6673     if (!A.checkForAllInstructions(
6674             [&](Instruction &I) {
6675               CallInst &CI = cast<CallInst>(I);
6676               if (CI.isTailCall())
6677                 TailCalls.push_back(&CI);
6678               return true;
6679             },
6680             *this, {Instruction::Call}, UsedAssumedInformation))
6681       return ChangeStatus::UNCHANGED;
6682 
6683     Argument *Arg = getAssociatedArgument();
6684     // Query AAAlign attribute for alignment of associated argument to
6685     // determine the best alignment of loads.
6686     const auto &AlignAA =
6687         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
6688 
6689     // Callback to repair the associated function. A new alloca is placed at the
6690     // beginning and initialized with the values passed through arguments. The
6691     // new alloca replaces the use of the old pointer argument.
6692     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
6693         [=](const Attributor::ArgumentReplacementInfo &ARI,
6694             Function &ReplacementFn, Function::arg_iterator ArgIt) {
6695           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
6696           Instruction *IP = &*EntryBB.getFirstInsertionPt();
6697           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
6698                                            Arg->getName() + ".priv", IP);
6699           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
6700                                ArgIt->getArgNo(), *IP);
6701 
6702           if (AI->getType() != Arg->getType())
6703             AI =
6704                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
6705           Arg->replaceAllUsesWith(AI);
6706 
6707           for (CallInst *CI : TailCalls)
6708             CI->setTailCall(false);
6709         };
6710 
6711     // Callback to repair a call site of the associated function. The elements
6712     // of the privatizable type are loaded prior to the call and passed to the
6713     // new function version.
6714     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
6715         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
6716                       AbstractCallSite ACS,
6717                       SmallVectorImpl<Value *> &NewArgOperands) {
6718           // When no alignment is specified for the load instruction,
6719           // natural alignment is assumed.
6720           createReplacementValues(
6721               assumeAligned(AlignAA.getAssumedAlign()),
6722               PrivatizableType.getValue(), ACS,
6723               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
6724               NewArgOperands);
6725         };
6726 
6727     // Collect the types that will replace the privatizable type in the function
6728     // signature.
6729     SmallVector<Type *, 16> ReplacementTypes;
6730     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
6731 
6732     // Register a rewrite of the argument.
6733     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
6734                                            std::move(FnRepairCB),
6735                                            std::move(ACSRepairCB)))
6736       return ChangeStatus::CHANGED;
6737     return ChangeStatus::UNCHANGED;
6738   }
6739 
6740   /// See AbstractAttribute::trackStatistics()
6741   void trackStatistics() const override {
6742     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
6743   }
6744 };
6745 
6746 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
6747   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
6748       : AAPrivatizablePtrImpl(IRP, A) {}
6749 
6750   /// See AbstractAttribute::initialize(...).
6751   virtual void initialize(Attributor &A) override {
6752     // TODO: We can privatize more than arguments.
6753     indicatePessimisticFixpoint();
6754   }
6755 
6756   ChangeStatus updateImpl(Attributor &A) override {
6757     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
6758                      "updateImpl will not be called");
6759   }
6760 
6761   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
6762   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
6763     Value *Obj = getUnderlyingObject(&getAssociatedValue());
6764     if (!Obj) {
6765       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
6766       return nullptr;
6767     }
6768 
6769     if (auto *AI = dyn_cast<AllocaInst>(Obj))
6770       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
6771         if (CI->isOne())
6772           return Obj->getType()->getPointerElementType();
6773     if (auto *Arg = dyn_cast<Argument>(Obj)) {
6774       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
6775           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
6776       if (PrivArgAA.isAssumedPrivatizablePtr())
6777         return Obj->getType()->getPointerElementType();
6778     }
6779 
6780     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
6781                          "alloca nor privatizable argument: "
6782                       << *Obj << "!\n");
6783     return nullptr;
6784   }
6785 
6786   /// See AbstractAttribute::trackStatistics()
6787   void trackStatistics() const override {
6788     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
6789   }
6790 };
6791 
6792 struct AAPrivatizablePtrCallSiteArgument final
6793     : public AAPrivatizablePtrFloating {
6794   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
6795       : AAPrivatizablePtrFloating(IRP, A) {}
6796 
6797   /// See AbstractAttribute::initialize(...).
6798   void initialize(Attributor &A) override {
6799     if (getIRPosition().hasAttr(Attribute::ByVal))
6800       indicateOptimisticFixpoint();
6801   }
6802 
6803   /// See AbstractAttribute::updateImpl(...).
6804   ChangeStatus updateImpl(Attributor &A) override {
6805     PrivatizableType = identifyPrivatizableType(A);
6806     if (!PrivatizableType.hasValue())
6807       return ChangeStatus::UNCHANGED;
6808     if (!PrivatizableType.getValue())
6809       return indicatePessimisticFixpoint();
6810 
6811     const IRPosition &IRP = getIRPosition();
6812     auto &NoCaptureAA =
6813         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
6814     if (!NoCaptureAA.isAssumedNoCapture()) {
6815       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
6816       return indicatePessimisticFixpoint();
6817     }
6818 
6819     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
6820     if (!NoAliasAA.isAssumedNoAlias()) {
6821       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
6822       return indicatePessimisticFixpoint();
6823     }
6824 
6825     const auto &MemBehaviorAA =
6826         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
6827     if (!MemBehaviorAA.isAssumedReadOnly()) {
6828       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
6829       return indicatePessimisticFixpoint();
6830     }
6831 
6832     return ChangeStatus::UNCHANGED;
6833   }
6834 
6835   /// See AbstractAttribute::trackStatistics()
6836   void trackStatistics() const override {
6837     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
6838   }
6839 };
6840 
6841 struct AAPrivatizablePtrCallSiteReturned final
6842     : public AAPrivatizablePtrFloating {
6843   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
6844       : AAPrivatizablePtrFloating(IRP, A) {}
6845 
6846   /// See AbstractAttribute::initialize(...).
6847   void initialize(Attributor &A) override {
6848     // TODO: We can privatize more than arguments.
6849     indicatePessimisticFixpoint();
6850   }
6851 
6852   /// See AbstractAttribute::trackStatistics()
6853   void trackStatistics() const override {
6854     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
6855   }
6856 };
6857 
6858 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
6859   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
6860       : AAPrivatizablePtrFloating(IRP, A) {}
6861 
6862   /// See AbstractAttribute::initialize(...).
6863   void initialize(Attributor &A) override {
6864     // TODO: We can privatize more than arguments.
6865     indicatePessimisticFixpoint();
6866   }
6867 
6868   /// See AbstractAttribute::trackStatistics()
6869   void trackStatistics() const override {
6870     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
6871   }
6872 };
6873 
6874 /// -------------------- Memory Behavior Attributes ----------------------------
6875 /// Includes read-none, read-only, and write-only.
6876 /// ----------------------------------------------------------------------------
6877 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
6878   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
6879       : AAMemoryBehavior(IRP, A) {}
6880 
6881   /// See AbstractAttribute::initialize(...).
6882   void initialize(Attributor &A) override {
6883     intersectAssumedBits(BEST_STATE);
6884     getKnownStateFromValue(getIRPosition(), getState());
6885     AAMemoryBehavior::initialize(A);
6886   }
6887 
6888   /// Return the memory behavior information encoded in the IR for \p IRP.
6889   static void getKnownStateFromValue(const IRPosition &IRP,
6890                                      BitIntegerState &State,
6891                                      bool IgnoreSubsumingPositions = false) {
6892     SmallVector<Attribute, 2> Attrs;
6893     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6894     for (const Attribute &Attr : Attrs) {
6895       switch (Attr.getKindAsEnum()) {
6896       case Attribute::ReadNone:
6897         State.addKnownBits(NO_ACCESSES);
6898         break;
6899       case Attribute::ReadOnly:
6900         State.addKnownBits(NO_WRITES);
6901         break;
6902       case Attribute::WriteOnly:
6903         State.addKnownBits(NO_READS);
6904         break;
6905       default:
6906         llvm_unreachable("Unexpected attribute!");
6907       }
6908     }
6909 
6910     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
6911       if (!I->mayReadFromMemory())
6912         State.addKnownBits(NO_READS);
6913       if (!I->mayWriteToMemory())
6914         State.addKnownBits(NO_WRITES);
6915     }
6916   }
6917 
6918   /// See AbstractAttribute::getDeducedAttributes(...).
6919   void getDeducedAttributes(LLVMContext &Ctx,
6920                             SmallVectorImpl<Attribute> &Attrs) const override {
6921     assert(Attrs.size() == 0);
6922     if (isAssumedReadNone())
6923       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6924     else if (isAssumedReadOnly())
6925       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
6926     else if (isAssumedWriteOnly())
6927       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
6928     assert(Attrs.size() <= 1);
6929   }
6930 
6931   /// See AbstractAttribute::manifest(...).
6932   ChangeStatus manifest(Attributor &A) override {
6933     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
6934       return ChangeStatus::UNCHANGED;
6935 
6936     const IRPosition &IRP = getIRPosition();
6937 
6938     // Check if we would improve the existing attributes first.
6939     SmallVector<Attribute, 4> DeducedAttrs;
6940     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6941     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6942           return IRP.hasAttr(Attr.getKindAsEnum(),
6943                              /* IgnoreSubsumingPositions */ true);
6944         }))
6945       return ChangeStatus::UNCHANGED;
6946 
6947     // Clear existing attributes.
6948     IRP.removeAttrs(AttrKinds);
6949 
6950     // Use the generic manifest method.
6951     return IRAttribute::manifest(A);
6952   }
6953 
6954   /// See AbstractState::getAsStr().
6955   const std::string getAsStr() const override {
6956     if (isAssumedReadNone())
6957       return "readnone";
6958     if (isAssumedReadOnly())
6959       return "readonly";
6960     if (isAssumedWriteOnly())
6961       return "writeonly";
6962     return "may-read/write";
6963   }
6964 
6965   /// The set of IR attributes AAMemoryBehavior deals with.
6966   static const Attribute::AttrKind AttrKinds[3];
6967 };
6968 
6969 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6970     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6971 
6972 /// Memory behavior attribute for a floating value.
6973 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6974   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6975       : AAMemoryBehaviorImpl(IRP, A) {}
6976 
6977   /// See AbstractAttribute::updateImpl(...).
6978   ChangeStatus updateImpl(Attributor &A) override;
6979 
6980   /// See AbstractAttribute::trackStatistics()
6981   void trackStatistics() const override {
6982     if (isAssumedReadNone())
6983       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6984     else if (isAssumedReadOnly())
6985       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6986     else if (isAssumedWriteOnly())
6987       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6988   }
6989 
6990 private:
6991   /// Return true if users of \p UserI might access the underlying
6992   /// variable/location described by \p U and should therefore be analyzed.
6993   bool followUsersOfUseIn(Attributor &A, const Use &U,
6994                           const Instruction *UserI);
6995 
6996   /// Update the state according to the effect of use \p U in \p UserI.
6997   void analyzeUseIn(Attributor &A, const Use &U, const Instruction *UserI);
6998 };
6999 
7000 /// Memory behavior attribute for function argument.
7001 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
7002   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
7003       : AAMemoryBehaviorFloating(IRP, A) {}
7004 
7005   /// See AbstractAttribute::initialize(...).
7006   void initialize(Attributor &A) override {
7007     intersectAssumedBits(BEST_STATE);
7008     const IRPosition &IRP = getIRPosition();
7009     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
7010     // can query it when we use has/getAttr. That would allow us to reuse the
7011     // initialize of the base class here.
7012     bool HasByVal =
7013         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
7014     getKnownStateFromValue(IRP, getState(),
7015                            /* IgnoreSubsumingPositions */ HasByVal);
7016 
7017     // Initialize the use vector with all direct uses of the associated value.
7018     Argument *Arg = getAssociatedArgument();
7019     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent())))
7020       indicatePessimisticFixpoint();
7021   }
7022 
7023   ChangeStatus manifest(Attributor &A) override {
7024     // TODO: Pointer arguments are not supported on vectors of pointers yet.
7025     if (!getAssociatedValue().getType()->isPointerTy())
7026       return ChangeStatus::UNCHANGED;
7027 
7028     // TODO: From readattrs.ll: "inalloca parameters are always
7029     //                           considered written"
7030     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
7031       removeKnownBits(NO_WRITES);
7032       removeAssumedBits(NO_WRITES);
7033     }
7034     return AAMemoryBehaviorFloating::manifest(A);
7035   }
7036 
7037   /// See AbstractAttribute::trackStatistics()
7038   void trackStatistics() const override {
7039     if (isAssumedReadNone())
7040       STATS_DECLTRACK_ARG_ATTR(readnone)
7041     else if (isAssumedReadOnly())
7042       STATS_DECLTRACK_ARG_ATTR(readonly)
7043     else if (isAssumedWriteOnly())
7044       STATS_DECLTRACK_ARG_ATTR(writeonly)
7045   }
7046 };
7047 
7048 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
7049   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
7050       : AAMemoryBehaviorArgument(IRP, A) {}
7051 
7052   /// See AbstractAttribute::initialize(...).
7053   void initialize(Attributor &A) override {
7054     // If we don't have an associated attribute this is either a variadic call
7055     // or an indirect call, either way, nothing to do here.
7056     Argument *Arg = getAssociatedArgument();
7057     if (!Arg) {
7058       indicatePessimisticFixpoint();
7059       return;
7060     }
7061     if (Arg->hasByValAttr()) {
7062       addKnownBits(NO_WRITES);
7063       removeKnownBits(NO_READS);
7064       removeAssumedBits(NO_READS);
7065     }
7066     AAMemoryBehaviorArgument::initialize(A);
7067     if (getAssociatedFunction()->isDeclaration())
7068       indicatePessimisticFixpoint();
7069   }
7070 
7071   /// See AbstractAttribute::updateImpl(...).
7072   ChangeStatus updateImpl(Attributor &A) override {
7073     // TODO: Once we have call site specific value information we can provide
7074     //       call site specific liveness liveness information and then it makes
7075     //       sense to specialize attributes for call sites arguments instead of
7076     //       redirecting requests to the callee argument.
7077     Argument *Arg = getAssociatedArgument();
7078     const IRPosition &ArgPos = IRPosition::argument(*Arg);
7079     auto &ArgAA =
7080         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
7081     return clampStateAndIndicateChange(getState(), ArgAA.getState());
7082   }
7083 
7084   /// See AbstractAttribute::trackStatistics()
7085   void trackStatistics() const override {
7086     if (isAssumedReadNone())
7087       STATS_DECLTRACK_CSARG_ATTR(readnone)
7088     else if (isAssumedReadOnly())
7089       STATS_DECLTRACK_CSARG_ATTR(readonly)
7090     else if (isAssumedWriteOnly())
7091       STATS_DECLTRACK_CSARG_ATTR(writeonly)
7092   }
7093 };
7094 
7095 /// Memory behavior attribute for a call site return position.
7096 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
7097   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
7098       : AAMemoryBehaviorFloating(IRP, A) {}
7099 
7100   /// See AbstractAttribute::initialize(...).
7101   void initialize(Attributor &A) override {
7102     AAMemoryBehaviorImpl::initialize(A);
7103     Function *F = getAssociatedFunction();
7104     if (!F || F->isDeclaration())
7105       indicatePessimisticFixpoint();
7106   }
7107 
7108   /// See AbstractAttribute::manifest(...).
7109   ChangeStatus manifest(Attributor &A) override {
7110     // We do not annotate returned values.
7111     return ChangeStatus::UNCHANGED;
7112   }
7113 
7114   /// See AbstractAttribute::trackStatistics()
7115   void trackStatistics() const override {}
7116 };
7117 
7118 /// An AA to represent the memory behavior function attributes.
7119 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
7120   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
7121       : AAMemoryBehaviorImpl(IRP, A) {}
7122 
7123   /// See AbstractAttribute::updateImpl(Attributor &A).
7124   virtual ChangeStatus updateImpl(Attributor &A) override;
7125 
7126   /// See AbstractAttribute::manifest(...).
7127   ChangeStatus manifest(Attributor &A) override {
7128     Function &F = cast<Function>(getAnchorValue());
7129     if (isAssumedReadNone()) {
7130       F.removeFnAttr(Attribute::ArgMemOnly);
7131       F.removeFnAttr(Attribute::InaccessibleMemOnly);
7132       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
7133     }
7134     return AAMemoryBehaviorImpl::manifest(A);
7135   }
7136 
7137   /// See AbstractAttribute::trackStatistics()
7138   void trackStatistics() const override {
7139     if (isAssumedReadNone())
7140       STATS_DECLTRACK_FN_ATTR(readnone)
7141     else if (isAssumedReadOnly())
7142       STATS_DECLTRACK_FN_ATTR(readonly)
7143     else if (isAssumedWriteOnly())
7144       STATS_DECLTRACK_FN_ATTR(writeonly)
7145   }
7146 };
7147 
7148 /// AAMemoryBehavior attribute for call sites.
7149 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
7150   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
7151       : AAMemoryBehaviorImpl(IRP, A) {}
7152 
7153   /// See AbstractAttribute::initialize(...).
7154   void initialize(Attributor &A) override {
7155     AAMemoryBehaviorImpl::initialize(A);
7156     Function *F = getAssociatedFunction();
7157     if (!F || F->isDeclaration())
7158       indicatePessimisticFixpoint();
7159   }
7160 
7161   /// See AbstractAttribute::updateImpl(...).
7162   ChangeStatus updateImpl(Attributor &A) override {
7163     // TODO: Once we have call site specific value information we can provide
7164     //       call site specific liveness liveness information and then it makes
7165     //       sense to specialize attributes for call sites arguments instead of
7166     //       redirecting requests to the callee argument.
7167     Function *F = getAssociatedFunction();
7168     const IRPosition &FnPos = IRPosition::function(*F);
7169     auto &FnAA =
7170         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
7171     return clampStateAndIndicateChange(getState(), FnAA.getState());
7172   }
7173 
7174   /// See AbstractAttribute::trackStatistics()
7175   void trackStatistics() const override {
7176     if (isAssumedReadNone())
7177       STATS_DECLTRACK_CS_ATTR(readnone)
7178     else if (isAssumedReadOnly())
7179       STATS_DECLTRACK_CS_ATTR(readonly)
7180     else if (isAssumedWriteOnly())
7181       STATS_DECLTRACK_CS_ATTR(writeonly)
7182   }
7183 };
7184 
7185 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
7186 
7187   // The current assumed state used to determine a change.
7188   auto AssumedState = getAssumed();
7189 
7190   auto CheckRWInst = [&](Instruction &I) {
7191     // If the instruction has an own memory behavior state, use it to restrict
7192     // the local state. No further analysis is required as the other memory
7193     // state is as optimistic as it gets.
7194     if (const auto *CB = dyn_cast<CallBase>(&I)) {
7195       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
7196           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
7197       intersectAssumedBits(MemBehaviorAA.getAssumed());
7198       return !isAtFixpoint();
7199     }
7200 
7201     // Remove access kind modifiers if necessary.
7202     if (I.mayReadFromMemory())
7203       removeAssumedBits(NO_READS);
7204     if (I.mayWriteToMemory())
7205       removeAssumedBits(NO_WRITES);
7206     return !isAtFixpoint();
7207   };
7208 
7209   bool UsedAssumedInformation = false;
7210   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7211                                           UsedAssumedInformation))
7212     return indicatePessimisticFixpoint();
7213 
7214   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7215                                         : ChangeStatus::UNCHANGED;
7216 }
7217 
7218 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
7219 
7220   const IRPosition &IRP = getIRPosition();
7221   const IRPosition &FnPos = IRPosition::function_scope(IRP);
7222   AAMemoryBehavior::StateType &S = getState();
7223 
7224   // First, check the function scope. We take the known information and we avoid
7225   // work if the assumed information implies the current assumed information for
7226   // this attribute. This is a valid for all but byval arguments.
7227   Argument *Arg = IRP.getAssociatedArgument();
7228   AAMemoryBehavior::base_t FnMemAssumedState =
7229       AAMemoryBehavior::StateType::getWorstState();
7230   if (!Arg || !Arg->hasByValAttr()) {
7231     const auto &FnMemAA =
7232         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
7233     FnMemAssumedState = FnMemAA.getAssumed();
7234     S.addKnownBits(FnMemAA.getKnown());
7235     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
7236       return ChangeStatus::UNCHANGED;
7237   }
7238 
7239   // The current assumed state used to determine a change.
7240   auto AssumedState = S.getAssumed();
7241 
7242   // Make sure the value is not captured (except through "return"), if
7243   // it is, any information derived would be irrelevant anyway as we cannot
7244   // check the potential aliases introduced by the capture. However, no need
7245   // to fall back to anythign less optimistic than the function state.
7246   const auto &ArgNoCaptureAA =
7247       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
7248   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
7249     S.intersectAssumedBits(FnMemAssumedState);
7250     return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7251                                           : ChangeStatus::UNCHANGED;
7252   }
7253 
7254   // Visit and expand uses until all are analyzed or a fixpoint is reached.
7255   auto UsePred = [&](const Use &U, bool &Follow) -> bool {
7256     Instruction *UserI = cast<Instruction>(U.getUser());
7257     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << *U << " in " << *UserI
7258                       << " \n");
7259 
7260     // Droppable users, e.g., llvm::assume does not actually perform any action.
7261     if (UserI->isDroppable())
7262       return true;
7263 
7264     // Check if the users of UserI should also be visited.
7265     Follow = followUsersOfUseIn(A, U, UserI);
7266 
7267     // If UserI might touch memory we analyze the use in detail.
7268     if (UserI->mayReadOrWriteMemory())
7269       analyzeUseIn(A, U, UserI);
7270 
7271     return !isAtFixpoint();
7272   };
7273 
7274   if (!A.checkForAllUses(UsePred, *this, getAssociatedValue()))
7275     return indicatePessimisticFixpoint();
7276 
7277   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
7278                                         : ChangeStatus::UNCHANGED;
7279 }
7280 
7281 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use &U,
7282                                                   const Instruction *UserI) {
7283   // The loaded value is unrelated to the pointer argument, no need to
7284   // follow the users of the load.
7285   if (isa<LoadInst>(UserI))
7286     return false;
7287 
7288   // By default we follow all uses assuming UserI might leak information on U,
7289   // we have special handling for call sites operands though.
7290   const auto *CB = dyn_cast<CallBase>(UserI);
7291   if (!CB || !CB->isArgOperand(&U))
7292     return true;
7293 
7294   // If the use is a call argument known not to be captured, the users of
7295   // the call do not need to be visited because they have to be unrelated to
7296   // the input. Note that this check is not trivial even though we disallow
7297   // general capturing of the underlying argument. The reason is that the
7298   // call might the argument "through return", which we allow and for which we
7299   // need to check call users.
7300   if (U.get()->getType()->isPointerTy()) {
7301     unsigned ArgNo = CB->getArgOperandNo(&U);
7302     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
7303         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
7304     return !ArgNoCaptureAA.isAssumedNoCapture();
7305   }
7306 
7307   return true;
7308 }
7309 
7310 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use &U,
7311                                             const Instruction *UserI) {
7312   assert(UserI->mayReadOrWriteMemory());
7313 
7314   switch (UserI->getOpcode()) {
7315   default:
7316     // TODO: Handle all atomics and other side-effect operations we know of.
7317     break;
7318   case Instruction::Load:
7319     // Loads cause the NO_READS property to disappear.
7320     removeAssumedBits(NO_READS);
7321     return;
7322 
7323   case Instruction::Store:
7324     // Stores cause the NO_WRITES property to disappear if the use is the
7325     // pointer operand. Note that while capturing was taken care of somewhere
7326     // else we need to deal with stores of the value that is not looked through.
7327     if (cast<StoreInst>(UserI)->getPointerOperand() == U.get())
7328       removeAssumedBits(NO_WRITES);
7329     else
7330       indicatePessimisticFixpoint();
7331     return;
7332 
7333   case Instruction::Call:
7334   case Instruction::CallBr:
7335   case Instruction::Invoke: {
7336     // For call sites we look at the argument memory behavior attribute (this
7337     // could be recursive!) in order to restrict our own state.
7338     const auto *CB = cast<CallBase>(UserI);
7339 
7340     // Give up on operand bundles.
7341     if (CB->isBundleOperand(&U)) {
7342       indicatePessimisticFixpoint();
7343       return;
7344     }
7345 
7346     // Calling a function does read the function pointer, maybe write it if the
7347     // function is self-modifying.
7348     if (CB->isCallee(&U)) {
7349       removeAssumedBits(NO_READS);
7350       break;
7351     }
7352 
7353     // Adjust the possible access behavior based on the information on the
7354     // argument.
7355     IRPosition Pos;
7356     if (U.get()->getType()->isPointerTy())
7357       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
7358     else
7359       Pos = IRPosition::callsite_function(*CB);
7360     const auto &MemBehaviorAA =
7361         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
7362     // "assumed" has at most the same bits as the MemBehaviorAA assumed
7363     // and at least "known".
7364     intersectAssumedBits(MemBehaviorAA.getAssumed());
7365     return;
7366   }
7367   };
7368 
7369   // Generally, look at the "may-properties" and adjust the assumed state if we
7370   // did not trigger special handling before.
7371   if (UserI->mayReadFromMemory())
7372     removeAssumedBits(NO_READS);
7373   if (UserI->mayWriteToMemory())
7374     removeAssumedBits(NO_WRITES);
7375 }
7376 
7377 /// -------------------- Memory Locations Attributes ---------------------------
7378 /// Includes read-none, argmemonly, inaccessiblememonly,
7379 /// inaccessiblememorargmemonly
7380 /// ----------------------------------------------------------------------------
7381 
7382 std::string AAMemoryLocation::getMemoryLocationsAsStr(
7383     AAMemoryLocation::MemoryLocationsKind MLK) {
7384   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
7385     return "all memory";
7386   if (MLK == AAMemoryLocation::NO_LOCATIONS)
7387     return "no memory";
7388   std::string S = "memory:";
7389   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
7390     S += "stack,";
7391   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
7392     S += "constant,";
7393   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
7394     S += "internal global,";
7395   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
7396     S += "external global,";
7397   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
7398     S += "argument,";
7399   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
7400     S += "inaccessible,";
7401   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
7402     S += "malloced,";
7403   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
7404     S += "unknown,";
7405   S.pop_back();
7406   return S;
7407 }
7408 
7409 namespace {
7410 struct AAMemoryLocationImpl : public AAMemoryLocation {
7411 
7412   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
7413       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
7414     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7415       AccessKind2Accesses[u] = nullptr;
7416   }
7417 
7418   ~AAMemoryLocationImpl() {
7419     // The AccessSets are allocated via a BumpPtrAllocator, we call
7420     // the destructor manually.
7421     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
7422       if (AccessKind2Accesses[u])
7423         AccessKind2Accesses[u]->~AccessSet();
7424   }
7425 
7426   /// See AbstractAttribute::initialize(...).
7427   void initialize(Attributor &A) override {
7428     intersectAssumedBits(BEST_STATE);
7429     getKnownStateFromValue(A, getIRPosition(), getState());
7430     AAMemoryLocation::initialize(A);
7431   }
7432 
7433   /// Return the memory behavior information encoded in the IR for \p IRP.
7434   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
7435                                      BitIntegerState &State,
7436                                      bool IgnoreSubsumingPositions = false) {
7437     // For internal functions we ignore `argmemonly` and
7438     // `inaccessiblememorargmemonly` as we might break it via interprocedural
7439     // constant propagation. It is unclear if this is the best way but it is
7440     // unlikely this will cause real performance problems. If we are deriving
7441     // attributes for the anchor function we even remove the attribute in
7442     // addition to ignoring it.
7443     bool UseArgMemOnly = true;
7444     Function *AnchorFn = IRP.getAnchorScope();
7445     if (AnchorFn && A.isRunOn(*AnchorFn))
7446       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
7447 
7448     SmallVector<Attribute, 2> Attrs;
7449     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
7450     for (const Attribute &Attr : Attrs) {
7451       switch (Attr.getKindAsEnum()) {
7452       case Attribute::ReadNone:
7453         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
7454         break;
7455       case Attribute::InaccessibleMemOnly:
7456         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
7457         break;
7458       case Attribute::ArgMemOnly:
7459         if (UseArgMemOnly)
7460           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
7461         else
7462           IRP.removeAttrs({Attribute::ArgMemOnly});
7463         break;
7464       case Attribute::InaccessibleMemOrArgMemOnly:
7465         if (UseArgMemOnly)
7466           State.addKnownBits(inverseLocation(
7467               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
7468         else
7469           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
7470         break;
7471       default:
7472         llvm_unreachable("Unexpected attribute!");
7473       }
7474     }
7475   }
7476 
7477   /// See AbstractAttribute::getDeducedAttributes(...).
7478   void getDeducedAttributes(LLVMContext &Ctx,
7479                             SmallVectorImpl<Attribute> &Attrs) const override {
7480     assert(Attrs.size() == 0);
7481     if (isAssumedReadNone()) {
7482       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
7483     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
7484       if (isAssumedInaccessibleMemOnly())
7485         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
7486       else if (isAssumedArgMemOnly())
7487         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
7488       else if (isAssumedInaccessibleOrArgMemOnly())
7489         Attrs.push_back(
7490             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
7491     }
7492     assert(Attrs.size() <= 1);
7493   }
7494 
7495   /// See AbstractAttribute::manifest(...).
7496   ChangeStatus manifest(Attributor &A) override {
7497     const IRPosition &IRP = getIRPosition();
7498 
7499     // Check if we would improve the existing attributes first.
7500     SmallVector<Attribute, 4> DeducedAttrs;
7501     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
7502     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
7503           return IRP.hasAttr(Attr.getKindAsEnum(),
7504                              /* IgnoreSubsumingPositions */ true);
7505         }))
7506       return ChangeStatus::UNCHANGED;
7507 
7508     // Clear existing attributes.
7509     IRP.removeAttrs(AttrKinds);
7510     if (isAssumedReadNone())
7511       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
7512 
7513     // Use the generic manifest method.
7514     return IRAttribute::manifest(A);
7515   }
7516 
7517   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
7518   bool checkForAllAccessesToMemoryKind(
7519       function_ref<bool(const Instruction *, const Value *, AccessKind,
7520                         MemoryLocationsKind)>
7521           Pred,
7522       MemoryLocationsKind RequestedMLK) const override {
7523     if (!isValidState())
7524       return false;
7525 
7526     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
7527     if (AssumedMLK == NO_LOCATIONS)
7528       return true;
7529 
7530     unsigned Idx = 0;
7531     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
7532          CurMLK *= 2, ++Idx) {
7533       if (CurMLK & RequestedMLK)
7534         continue;
7535 
7536       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
7537         for (const AccessInfo &AI : *Accesses)
7538           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
7539             return false;
7540     }
7541 
7542     return true;
7543   }
7544 
7545   ChangeStatus indicatePessimisticFixpoint() override {
7546     // If we give up and indicate a pessimistic fixpoint this instruction will
7547     // become an access for all potential access kinds:
7548     // TODO: Add pointers for argmemonly and globals to improve the results of
7549     //       checkForAllAccessesToMemoryKind.
7550     bool Changed = false;
7551     MemoryLocationsKind KnownMLK = getKnown();
7552     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
7553     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
7554       if (!(CurMLK & KnownMLK))
7555         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
7556                                   getAccessKindFromInst(I));
7557     return AAMemoryLocation::indicatePessimisticFixpoint();
7558   }
7559 
7560 protected:
7561   /// Helper struct to tie together an instruction that has a read or write
7562   /// effect with the pointer it accesses (if any).
7563   struct AccessInfo {
7564 
7565     /// The instruction that caused the access.
7566     const Instruction *I;
7567 
7568     /// The base pointer that is accessed, or null if unknown.
7569     const Value *Ptr;
7570 
7571     /// The kind of access (read/write/read+write).
7572     AccessKind Kind;
7573 
7574     bool operator==(const AccessInfo &RHS) const {
7575       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
7576     }
7577     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
7578       if (LHS.I != RHS.I)
7579         return LHS.I < RHS.I;
7580       if (LHS.Ptr != RHS.Ptr)
7581         return LHS.Ptr < RHS.Ptr;
7582       if (LHS.Kind != RHS.Kind)
7583         return LHS.Kind < RHS.Kind;
7584       return false;
7585     }
7586   };
7587 
7588   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
7589   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
7590   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
7591   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
7592 
7593   /// Categorize the pointer arguments of CB that might access memory in
7594   /// AccessedLoc and update the state and access map accordingly.
7595   void
7596   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
7597                                      AAMemoryLocation::StateType &AccessedLocs,
7598                                      bool &Changed);
7599 
7600   /// Return the kind(s) of location that may be accessed by \p V.
7601   AAMemoryLocation::MemoryLocationsKind
7602   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
7603 
7604   /// Return the access kind as determined by \p I.
7605   AccessKind getAccessKindFromInst(const Instruction *I) {
7606     AccessKind AK = READ_WRITE;
7607     if (I) {
7608       AK = I->mayReadFromMemory() ? READ : NONE;
7609       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
7610     }
7611     return AK;
7612   }
7613 
7614   /// Update the state \p State and the AccessKind2Accesses given that \p I is
7615   /// an access of kind \p AK to a \p MLK memory location with the access
7616   /// pointer \p Ptr.
7617   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
7618                                  MemoryLocationsKind MLK, const Instruction *I,
7619                                  const Value *Ptr, bool &Changed,
7620                                  AccessKind AK = READ_WRITE) {
7621 
7622     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
7623     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
7624     if (!Accesses)
7625       Accesses = new (Allocator) AccessSet();
7626     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
7627     State.removeAssumedBits(MLK);
7628   }
7629 
7630   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
7631   /// arguments, and update the state and access map accordingly.
7632   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
7633                           AAMemoryLocation::StateType &State, bool &Changed);
7634 
7635   /// Used to allocate access sets.
7636   BumpPtrAllocator &Allocator;
7637 
7638   /// The set of IR attributes AAMemoryLocation deals with.
7639   static const Attribute::AttrKind AttrKinds[4];
7640 };
7641 
7642 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
7643     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
7644     Attribute::InaccessibleMemOrArgMemOnly};
7645 
7646 void AAMemoryLocationImpl::categorizePtrValue(
7647     Attributor &A, const Instruction &I, const Value &Ptr,
7648     AAMemoryLocation::StateType &State, bool &Changed) {
7649   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
7650                     << Ptr << " ["
7651                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
7652 
7653   SmallVector<Value *, 8> Objects;
7654   if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, *this, &I)) {
7655     LLVM_DEBUG(
7656         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
7657     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
7658                               getAccessKindFromInst(&I));
7659     return;
7660   }
7661 
7662   for (Value *Obj : Objects) {
7663     // TODO: recognize the TBAA used for constant accesses.
7664     MemoryLocationsKind MLK = NO_LOCATIONS;
7665     assert(!isa<GEPOperator>(Obj) && "GEPs should have been stripped.");
7666     if (isa<UndefValue>(Obj))
7667       continue;
7668     if (isa<Argument>(Obj)) {
7669       // TODO: For now we do not treat byval arguments as local copies performed
7670       // on the call edge, though, we should. To make that happen we need to
7671       // teach various passes, e.g., DSE, about the copy effect of a byval. That
7672       // would also allow us to mark functions only accessing byval arguments as
7673       // readnone again, atguably their acceses have no effect outside of the
7674       // function, like accesses to allocas.
7675       MLK = NO_ARGUMENT_MEM;
7676     } else if (auto *GV = dyn_cast<GlobalValue>(Obj)) {
7677       // Reading constant memory is not treated as a read "effect" by the
7678       // function attr pass so we won't neither. Constants defined by TBAA are
7679       // similar. (We know we do not write it because it is constant.)
7680       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
7681         if (GVar->isConstant())
7682           continue;
7683 
7684       if (GV->hasLocalLinkage())
7685         MLK = NO_GLOBAL_INTERNAL_MEM;
7686       else
7687         MLK = NO_GLOBAL_EXTERNAL_MEM;
7688     } else if (isa<ConstantPointerNull>(Obj) &&
7689                !NullPointerIsDefined(getAssociatedFunction(),
7690                                      Ptr.getType()->getPointerAddressSpace())) {
7691       continue;
7692     } else if (isa<AllocaInst>(Obj)) {
7693       MLK = NO_LOCAL_MEM;
7694     } else if (const auto *CB = dyn_cast<CallBase>(Obj)) {
7695       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
7696           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
7697       if (NoAliasAA.isAssumedNoAlias())
7698         MLK = NO_MALLOCED_MEM;
7699       else
7700         MLK = NO_UNKOWN_MEM;
7701     } else {
7702       MLK = NO_UNKOWN_MEM;
7703     }
7704 
7705     assert(MLK != NO_LOCATIONS && "No location specified!");
7706     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value can be categorized: "
7707                       << *Obj << " -> " << getMemoryLocationsAsStr(MLK)
7708                       << "\n");
7709     updateStateAndAccessesMap(getState(), MLK, &I, Obj, Changed,
7710                               getAccessKindFromInst(&I));
7711   }
7712 
7713   LLVM_DEBUG(
7714       dbgs() << "[AAMemoryLocation] Accessed locations with pointer locations: "
7715              << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
7716 }
7717 
7718 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
7719     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
7720     bool &Changed) {
7721   for (unsigned ArgNo = 0, E = CB.arg_size(); ArgNo < E; ++ArgNo) {
7722 
7723     // Skip non-pointer arguments.
7724     const Value *ArgOp = CB.getArgOperand(ArgNo);
7725     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
7726       continue;
7727 
7728     // Skip readnone arguments.
7729     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
7730     const auto &ArgOpMemLocationAA =
7731         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
7732 
7733     if (ArgOpMemLocationAA.isAssumedReadNone())
7734       continue;
7735 
7736     // Categorize potentially accessed pointer arguments as if there was an
7737     // access instruction with them as pointer.
7738     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
7739   }
7740 }
7741 
7742 AAMemoryLocation::MemoryLocationsKind
7743 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
7744                                                   bool &Changed) {
7745   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
7746                     << I << "\n");
7747 
7748   AAMemoryLocation::StateType AccessedLocs;
7749   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
7750 
7751   if (auto *CB = dyn_cast<CallBase>(&I)) {
7752 
7753     // First check if we assume any memory is access is visible.
7754     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
7755         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
7756     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
7757                       << " [" << CBMemLocationAA << "]\n");
7758 
7759     if (CBMemLocationAA.isAssumedReadNone())
7760       return NO_LOCATIONS;
7761 
7762     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
7763       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
7764                                 Changed, getAccessKindFromInst(&I));
7765       return AccessedLocs.getAssumed();
7766     }
7767 
7768     uint32_t CBAssumedNotAccessedLocs =
7769         CBMemLocationAA.getAssumedNotAccessedLocation();
7770 
7771     // Set the argmemonly and global bit as we handle them separately below.
7772     uint32_t CBAssumedNotAccessedLocsNoArgMem =
7773         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
7774 
7775     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
7776       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
7777         continue;
7778       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
7779                                 getAccessKindFromInst(&I));
7780     }
7781 
7782     // Now handle global memory if it might be accessed. This is slightly tricky
7783     // as NO_GLOBAL_MEM has multiple bits set.
7784     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
7785     if (HasGlobalAccesses) {
7786       auto AccessPred = [&](const Instruction *, const Value *Ptr,
7787                             AccessKind Kind, MemoryLocationsKind MLK) {
7788         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
7789                                   getAccessKindFromInst(&I));
7790         return true;
7791       };
7792       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
7793               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
7794         return AccessedLocs.getWorstState();
7795     }
7796 
7797     LLVM_DEBUG(
7798         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
7799                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7800 
7801     // Now handle argument memory if it might be accessed.
7802     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
7803     if (HasArgAccesses)
7804       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
7805 
7806     LLVM_DEBUG(
7807         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
7808                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
7809 
7810     return AccessedLocs.getAssumed();
7811   }
7812 
7813   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
7814     LLVM_DEBUG(
7815         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
7816                << I << " [" << *Ptr << "]\n");
7817     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
7818     return AccessedLocs.getAssumed();
7819   }
7820 
7821   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
7822                     << I << "\n");
7823   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
7824                             getAccessKindFromInst(&I));
7825   return AccessedLocs.getAssumed();
7826 }
7827 
7828 /// An AA to represent the memory behavior function attributes.
7829 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
7830   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
7831       : AAMemoryLocationImpl(IRP, A) {}
7832 
7833   /// See AbstractAttribute::updateImpl(Attributor &A).
7834   virtual ChangeStatus updateImpl(Attributor &A) override {
7835 
7836     const auto &MemBehaviorAA =
7837         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
7838     if (MemBehaviorAA.isAssumedReadNone()) {
7839       if (MemBehaviorAA.isKnownReadNone())
7840         return indicateOptimisticFixpoint();
7841       assert(isAssumedReadNone() &&
7842              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
7843       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
7844       return ChangeStatus::UNCHANGED;
7845     }
7846 
7847     // The current assumed state used to determine a change.
7848     auto AssumedState = getAssumed();
7849     bool Changed = false;
7850 
7851     auto CheckRWInst = [&](Instruction &I) {
7852       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
7853       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
7854                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
7855       removeAssumedBits(inverseLocation(MLK, false, false));
7856       // Stop once only the valid bit set in the *not assumed location*, thus
7857       // once we don't actually exclude any memory locations in the state.
7858       return getAssumedNotAccessedLocation() != VALID_STATE;
7859     };
7860 
7861     bool UsedAssumedInformation = false;
7862     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this,
7863                                             UsedAssumedInformation))
7864       return indicatePessimisticFixpoint();
7865 
7866     Changed |= AssumedState != getAssumed();
7867     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7868   }
7869 
7870   /// See AbstractAttribute::trackStatistics()
7871   void trackStatistics() const override {
7872     if (isAssumedReadNone())
7873       STATS_DECLTRACK_FN_ATTR(readnone)
7874     else if (isAssumedArgMemOnly())
7875       STATS_DECLTRACK_FN_ATTR(argmemonly)
7876     else if (isAssumedInaccessibleMemOnly())
7877       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
7878     else if (isAssumedInaccessibleOrArgMemOnly())
7879       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
7880   }
7881 };
7882 
7883 /// AAMemoryLocation attribute for call sites.
7884 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
7885   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
7886       : AAMemoryLocationImpl(IRP, A) {}
7887 
7888   /// See AbstractAttribute::initialize(...).
7889   void initialize(Attributor &A) override {
7890     AAMemoryLocationImpl::initialize(A);
7891     Function *F = getAssociatedFunction();
7892     if (!F || F->isDeclaration())
7893       indicatePessimisticFixpoint();
7894   }
7895 
7896   /// See AbstractAttribute::updateImpl(...).
7897   ChangeStatus updateImpl(Attributor &A) override {
7898     // TODO: Once we have call site specific value information we can provide
7899     //       call site specific liveness liveness information and then it makes
7900     //       sense to specialize attributes for call sites arguments instead of
7901     //       redirecting requests to the callee argument.
7902     Function *F = getAssociatedFunction();
7903     const IRPosition &FnPos = IRPosition::function(*F);
7904     auto &FnAA =
7905         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7906     bool Changed = false;
7907     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7908                           AccessKind Kind, MemoryLocationsKind MLK) {
7909       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7910                                 getAccessKindFromInst(I));
7911       return true;
7912     };
7913     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7914       return indicatePessimisticFixpoint();
7915     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7916   }
7917 
7918   /// See AbstractAttribute::trackStatistics()
7919   void trackStatistics() const override {
7920     if (isAssumedReadNone())
7921       STATS_DECLTRACK_CS_ATTR(readnone)
7922   }
7923 };
7924 
7925 /// ------------------ Value Constant Range Attribute -------------------------
7926 
7927 struct AAValueConstantRangeImpl : AAValueConstantRange {
7928   using StateType = IntegerRangeState;
7929   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7930       : AAValueConstantRange(IRP, A) {}
7931 
7932   /// See AbstractAttribute::initialize(..).
7933   void initialize(Attributor &A) override {
7934     if (A.hasSimplificationCallback(getIRPosition())) {
7935       indicatePessimisticFixpoint();
7936       return;
7937     }
7938 
7939     // Intersect a range given by SCEV.
7940     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7941 
7942     // Intersect a range given by LVI.
7943     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7944   }
7945 
7946   /// See AbstractAttribute::getAsStr().
7947   const std::string getAsStr() const override {
7948     std::string Str;
7949     llvm::raw_string_ostream OS(Str);
7950     OS << "range(" << getBitWidth() << ")<";
7951     getKnown().print(OS);
7952     OS << " / ";
7953     getAssumed().print(OS);
7954     OS << ">";
7955     return OS.str();
7956   }
7957 
7958   /// Helper function to get a SCEV expr for the associated value at program
7959   /// point \p I.
7960   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7961     if (!getAnchorScope())
7962       return nullptr;
7963 
7964     ScalarEvolution *SE =
7965         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7966             *getAnchorScope());
7967 
7968     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7969         *getAnchorScope());
7970 
7971     if (!SE || !LI)
7972       return nullptr;
7973 
7974     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7975     if (!I)
7976       return S;
7977 
7978     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7979   }
7980 
7981   /// Helper function to get a range from SCEV for the associated value at
7982   /// program point \p I.
7983   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7984                                          const Instruction *I = nullptr) const {
7985     if (!getAnchorScope())
7986       return getWorstState(getBitWidth());
7987 
7988     ScalarEvolution *SE =
7989         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7990             *getAnchorScope());
7991 
7992     const SCEV *S = getSCEV(A, I);
7993     if (!SE || !S)
7994       return getWorstState(getBitWidth());
7995 
7996     return SE->getUnsignedRange(S);
7997   }
7998 
7999   /// Helper function to get a range from LVI for the associated value at
8000   /// program point \p I.
8001   ConstantRange
8002   getConstantRangeFromLVI(Attributor &A,
8003                           const Instruction *CtxI = nullptr) const {
8004     if (!getAnchorScope())
8005       return getWorstState(getBitWidth());
8006 
8007     LazyValueInfo *LVI =
8008         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
8009             *getAnchorScope());
8010 
8011     if (!LVI || !CtxI)
8012       return getWorstState(getBitWidth());
8013     return LVI->getConstantRange(&getAssociatedValue(),
8014                                  const_cast<Instruction *>(CtxI));
8015   }
8016 
8017   /// Return true if \p CtxI is valid for querying outside analyses.
8018   /// This basically makes sure we do not ask intra-procedural analysis
8019   /// about a context in the wrong function or a context that violates
8020   /// dominance assumptions they might have. The \p AllowAACtxI flag indicates
8021   /// if the original context of this AA is OK or should be considered invalid.
8022   bool isValidCtxInstructionForOutsideAnalysis(Attributor &A,
8023                                                const Instruction *CtxI,
8024                                                bool AllowAACtxI) const {
8025     if (!CtxI || (!AllowAACtxI && CtxI == getCtxI()))
8026       return false;
8027 
8028     // Our context might be in a different function, neither intra-procedural
8029     // analysis (ScalarEvolution nor LazyValueInfo) can handle that.
8030     if (!AA::isValidInScope(getAssociatedValue(), CtxI->getFunction()))
8031       return false;
8032 
8033     // If the context is not dominated by the value there are paths to the
8034     // context that do not define the value. This cannot be handled by
8035     // LazyValueInfo so we need to bail.
8036     if (auto *I = dyn_cast<Instruction>(&getAssociatedValue())) {
8037       InformationCache &InfoCache = A.getInfoCache();
8038       const DominatorTree *DT =
8039           InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
8040               *I->getFunction());
8041       return DT && DT->dominates(I, CtxI);
8042     }
8043 
8044     return true;
8045   }
8046 
8047   /// See AAValueConstantRange::getKnownConstantRange(..).
8048   ConstantRange
8049   getKnownConstantRange(Attributor &A,
8050                         const Instruction *CtxI = nullptr) const override {
8051     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8052                                                  /* AllowAACtxI */ false))
8053       return getKnown();
8054 
8055     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8056     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8057     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
8058   }
8059 
8060   /// See AAValueConstantRange::getAssumedConstantRange(..).
8061   ConstantRange
8062   getAssumedConstantRange(Attributor &A,
8063                           const Instruction *CtxI = nullptr) const override {
8064     // TODO: Make SCEV use Attributor assumption.
8065     //       We may be able to bound a variable range via assumptions in
8066     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
8067     //       evolve to x^2 + x, then we can say that y is in [2, 12].
8068     if (!isValidCtxInstructionForOutsideAnalysis(A, CtxI,
8069                                                  /* AllowAACtxI */ false))
8070       return getAssumed();
8071 
8072     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
8073     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
8074     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
8075   }
8076 
8077   /// Helper function to create MDNode for range metadata.
8078   static MDNode *
8079   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
8080                             const ConstantRange &AssumedConstantRange) {
8081     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
8082                                   Ty, AssumedConstantRange.getLower())),
8083                               ConstantAsMetadata::get(ConstantInt::get(
8084                                   Ty, AssumedConstantRange.getUpper()))};
8085     return MDNode::get(Ctx, LowAndHigh);
8086   }
8087 
8088   /// Return true if \p Assumed is included in \p KnownRanges.
8089   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
8090 
8091     if (Assumed.isFullSet())
8092       return false;
8093 
8094     if (!KnownRanges)
8095       return true;
8096 
8097     // If multiple ranges are annotated in IR, we give up to annotate assumed
8098     // range for now.
8099 
8100     // TODO:  If there exists a known range which containts assumed range, we
8101     // can say assumed range is better.
8102     if (KnownRanges->getNumOperands() > 2)
8103       return false;
8104 
8105     ConstantInt *Lower =
8106         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
8107     ConstantInt *Upper =
8108         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
8109 
8110     ConstantRange Known(Lower->getValue(), Upper->getValue());
8111     return Known.contains(Assumed) && Known != Assumed;
8112   }
8113 
8114   /// Helper function to set range metadata.
8115   static bool
8116   setRangeMetadataIfisBetterRange(Instruction *I,
8117                                   const ConstantRange &AssumedConstantRange) {
8118     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
8119     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
8120       if (!AssumedConstantRange.isEmptySet()) {
8121         I->setMetadata(LLVMContext::MD_range,
8122                        getMDNodeForConstantRange(I->getType(), I->getContext(),
8123                                                  AssumedConstantRange));
8124         return true;
8125       }
8126     }
8127     return false;
8128   }
8129 
8130   /// See AbstractAttribute::manifest()
8131   ChangeStatus manifest(Attributor &A) override {
8132     ChangeStatus Changed = ChangeStatus::UNCHANGED;
8133     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
8134     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
8135 
8136     auto &V = getAssociatedValue();
8137     if (!AssumedConstantRange.isEmptySet() &&
8138         !AssumedConstantRange.isSingleElement()) {
8139       if (Instruction *I = dyn_cast<Instruction>(&V)) {
8140         assert(I == getCtxI() && "Should not annotate an instruction which is "
8141                                  "not the context instruction");
8142         if (isa<CallInst>(I) || isa<LoadInst>(I))
8143           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
8144             Changed = ChangeStatus::CHANGED;
8145       }
8146     }
8147 
8148     return Changed;
8149   }
8150 };
8151 
8152 struct AAValueConstantRangeArgument final
8153     : AAArgumentFromCallSiteArguments<
8154           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8155           true /* BridgeCallBaseContext */> {
8156   using Base = AAArgumentFromCallSiteArguments<
8157       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
8158       true /* BridgeCallBaseContext */>;
8159   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
8160       : Base(IRP, A) {}
8161 
8162   /// See AbstractAttribute::initialize(..).
8163   void initialize(Attributor &A) override {
8164     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8165       indicatePessimisticFixpoint();
8166     } else {
8167       Base::initialize(A);
8168     }
8169   }
8170 
8171   /// See AbstractAttribute::trackStatistics()
8172   void trackStatistics() const override {
8173     STATS_DECLTRACK_ARG_ATTR(value_range)
8174   }
8175 };
8176 
8177 struct AAValueConstantRangeReturned
8178     : AAReturnedFromReturnedValues<AAValueConstantRange,
8179                                    AAValueConstantRangeImpl,
8180                                    AAValueConstantRangeImpl::StateType,
8181                                    /* PropogateCallBaseContext */ true> {
8182   using Base =
8183       AAReturnedFromReturnedValues<AAValueConstantRange,
8184                                    AAValueConstantRangeImpl,
8185                                    AAValueConstantRangeImpl::StateType,
8186                                    /* PropogateCallBaseContext */ true>;
8187   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
8188       : Base(IRP, A) {}
8189 
8190   /// See AbstractAttribute::initialize(...).
8191   void initialize(Attributor &A) override {}
8192 
8193   /// See AbstractAttribute::trackStatistics()
8194   void trackStatistics() const override {
8195     STATS_DECLTRACK_FNRET_ATTR(value_range)
8196   }
8197 };
8198 
8199 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
8200   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
8201       : AAValueConstantRangeImpl(IRP, A) {}
8202 
8203   /// See AbstractAttribute::initialize(...).
8204   void initialize(Attributor &A) override {
8205     AAValueConstantRangeImpl::initialize(A);
8206     if (isAtFixpoint())
8207       return;
8208 
8209     Value &V = getAssociatedValue();
8210 
8211     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8212       unionAssumed(ConstantRange(C->getValue()));
8213       indicateOptimisticFixpoint();
8214       return;
8215     }
8216 
8217     if (isa<UndefValue>(&V)) {
8218       // Collapse the undef state to 0.
8219       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
8220       indicateOptimisticFixpoint();
8221       return;
8222     }
8223 
8224     if (isa<CallBase>(&V))
8225       return;
8226 
8227     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
8228       return;
8229 
8230     // If it is a load instruction with range metadata, use it.
8231     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
8232       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
8233         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8234         return;
8235       }
8236 
8237     // We can work with PHI and select instruction as we traverse their operands
8238     // during update.
8239     if (isa<SelectInst>(V) || isa<PHINode>(V))
8240       return;
8241 
8242     // Otherwise we give up.
8243     indicatePessimisticFixpoint();
8244 
8245     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
8246                       << getAssociatedValue() << "\n");
8247   }
8248 
8249   bool calculateBinaryOperator(
8250       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
8251       const Instruction *CtxI,
8252       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8253     Value *LHS = BinOp->getOperand(0);
8254     Value *RHS = BinOp->getOperand(1);
8255 
8256     // Simplify the operands first.
8257     bool UsedAssumedInformation = false;
8258     const auto &SimplifiedLHS =
8259         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8260                                *this, UsedAssumedInformation);
8261     if (!SimplifiedLHS.hasValue())
8262       return true;
8263     if (!SimplifiedLHS.getValue())
8264       return false;
8265     LHS = *SimplifiedLHS;
8266 
8267     const auto &SimplifiedRHS =
8268         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8269                                *this, UsedAssumedInformation);
8270     if (!SimplifiedRHS.hasValue())
8271       return true;
8272     if (!SimplifiedRHS.getValue())
8273       return false;
8274     RHS = *SimplifiedRHS;
8275 
8276     // TODO: Allow non integers as well.
8277     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8278       return false;
8279 
8280     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8281         *this, IRPosition::value(*LHS, getCallBaseContext()),
8282         DepClassTy::REQUIRED);
8283     QuerriedAAs.push_back(&LHSAA);
8284     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8285 
8286     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8287         *this, IRPosition::value(*RHS, getCallBaseContext()),
8288         DepClassTy::REQUIRED);
8289     QuerriedAAs.push_back(&RHSAA);
8290     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8291 
8292     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
8293 
8294     T.unionAssumed(AssumedRange);
8295 
8296     // TODO: Track a known state too.
8297 
8298     return T.isValidState();
8299   }
8300 
8301   bool calculateCastInst(
8302       Attributor &A, CastInst *CastI, IntegerRangeState &T,
8303       const Instruction *CtxI,
8304       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8305     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
8306     // TODO: Allow non integers as well.
8307     Value *OpV = CastI->getOperand(0);
8308 
8309     // Simplify the operand first.
8310     bool UsedAssumedInformation = false;
8311     const auto &SimplifiedOpV =
8312         A.getAssumedSimplified(IRPosition::value(*OpV, getCallBaseContext()),
8313                                *this, UsedAssumedInformation);
8314     if (!SimplifiedOpV.hasValue())
8315       return true;
8316     if (!SimplifiedOpV.getValue())
8317       return false;
8318     OpV = *SimplifiedOpV;
8319 
8320     if (!OpV->getType()->isIntegerTy())
8321       return false;
8322 
8323     auto &OpAA = A.getAAFor<AAValueConstantRange>(
8324         *this, IRPosition::value(*OpV, getCallBaseContext()),
8325         DepClassTy::REQUIRED);
8326     QuerriedAAs.push_back(&OpAA);
8327     T.unionAssumed(
8328         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
8329     return T.isValidState();
8330   }
8331 
8332   bool
8333   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
8334                    const Instruction *CtxI,
8335                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
8336     Value *LHS = CmpI->getOperand(0);
8337     Value *RHS = CmpI->getOperand(1);
8338 
8339     // Simplify the operands first.
8340     bool UsedAssumedInformation = false;
8341     const auto &SimplifiedLHS =
8342         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8343                                *this, UsedAssumedInformation);
8344     if (!SimplifiedLHS.hasValue())
8345       return true;
8346     if (!SimplifiedLHS.getValue())
8347       return false;
8348     LHS = *SimplifiedLHS;
8349 
8350     const auto &SimplifiedRHS =
8351         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8352                                *this, UsedAssumedInformation);
8353     if (!SimplifiedRHS.hasValue())
8354       return true;
8355     if (!SimplifiedRHS.getValue())
8356       return false;
8357     RHS = *SimplifiedRHS;
8358 
8359     // TODO: Allow non integers as well.
8360     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8361       return false;
8362 
8363     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
8364         *this, IRPosition::value(*LHS, getCallBaseContext()),
8365         DepClassTy::REQUIRED);
8366     QuerriedAAs.push_back(&LHSAA);
8367     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
8368         *this, IRPosition::value(*RHS, getCallBaseContext()),
8369         DepClassTy::REQUIRED);
8370     QuerriedAAs.push_back(&RHSAA);
8371     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
8372     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
8373 
8374     // If one of them is empty set, we can't decide.
8375     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
8376       return true;
8377 
8378     bool MustTrue = false, MustFalse = false;
8379 
8380     auto AllowedRegion =
8381         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
8382 
8383     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
8384       MustFalse = true;
8385 
8386     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
8387       MustTrue = true;
8388 
8389     assert((!MustTrue || !MustFalse) &&
8390            "Either MustTrue or MustFalse should be false!");
8391 
8392     if (MustTrue)
8393       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
8394     else if (MustFalse)
8395       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
8396     else
8397       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
8398 
8399     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
8400                       << " " << RHSAA << "\n");
8401 
8402     // TODO: Track a known state too.
8403     return T.isValidState();
8404   }
8405 
8406   /// See AbstractAttribute::updateImpl(...).
8407   ChangeStatus updateImpl(Attributor &A) override {
8408     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8409                             IntegerRangeState &T, bool Stripped) -> bool {
8410       Instruction *I = dyn_cast<Instruction>(&V);
8411       if (!I || isa<CallBase>(I)) {
8412 
8413         // Simplify the operand first.
8414         bool UsedAssumedInformation = false;
8415         const auto &SimplifiedOpV =
8416             A.getAssumedSimplified(IRPosition::value(V, getCallBaseContext()),
8417                                    *this, UsedAssumedInformation);
8418         if (!SimplifiedOpV.hasValue())
8419           return true;
8420         if (!SimplifiedOpV.getValue())
8421           return false;
8422         Value *VPtr = *SimplifiedOpV;
8423 
8424         // If the value is not instruction, we query AA to Attributor.
8425         const auto &AA = A.getAAFor<AAValueConstantRange>(
8426             *this, IRPosition::value(*VPtr, getCallBaseContext()),
8427             DepClassTy::REQUIRED);
8428 
8429         // Clamp operator is not used to utilize a program point CtxI.
8430         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
8431 
8432         return T.isValidState();
8433       }
8434 
8435       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
8436       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
8437         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
8438           return false;
8439       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
8440         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
8441           return false;
8442       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
8443         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
8444           return false;
8445       } else {
8446         // Give up with other instructions.
8447         // TODO: Add other instructions
8448 
8449         T.indicatePessimisticFixpoint();
8450         return false;
8451       }
8452 
8453       // Catch circular reasoning in a pessimistic way for now.
8454       // TODO: Check how the range evolves and if we stripped anything, see also
8455       //       AADereferenceable or AAAlign for similar situations.
8456       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
8457         if (QueriedAA != this)
8458           continue;
8459         // If we are in a stady state we do not need to worry.
8460         if (T.getAssumed() == getState().getAssumed())
8461           continue;
8462         T.indicatePessimisticFixpoint();
8463       }
8464 
8465       return T.isValidState();
8466     };
8467 
8468     IntegerRangeState T(getBitWidth());
8469 
8470     if (!genericValueTraversal<IntegerRangeState>(A, getIRPosition(), *this, T,
8471                                                   VisitValueCB, getCtxI(),
8472                                                   /* UseValueSimplify */ false))
8473       return indicatePessimisticFixpoint();
8474 
8475     return clampStateAndIndicateChange(getState(), T);
8476   }
8477 
8478   /// See AbstractAttribute::trackStatistics()
8479   void trackStatistics() const override {
8480     STATS_DECLTRACK_FLOATING_ATTR(value_range)
8481   }
8482 };
8483 
8484 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
8485   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
8486       : AAValueConstantRangeImpl(IRP, A) {}
8487 
8488   /// See AbstractAttribute::initialize(...).
8489   ChangeStatus updateImpl(Attributor &A) override {
8490     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
8491                      "not be called");
8492   }
8493 
8494   /// See AbstractAttribute::trackStatistics()
8495   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
8496 };
8497 
8498 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
8499   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
8500       : AAValueConstantRangeFunction(IRP, A) {}
8501 
8502   /// See AbstractAttribute::trackStatistics()
8503   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
8504 };
8505 
8506 struct AAValueConstantRangeCallSiteReturned
8507     : AACallSiteReturnedFromReturned<AAValueConstantRange,
8508                                      AAValueConstantRangeImpl,
8509                                      AAValueConstantRangeImpl::StateType,
8510                                      /* IntroduceCallBaseContext */ true> {
8511   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
8512       : AACallSiteReturnedFromReturned<AAValueConstantRange,
8513                                        AAValueConstantRangeImpl,
8514                                        AAValueConstantRangeImpl::StateType,
8515                                        /* IntroduceCallBaseContext */ true>(IRP,
8516                                                                             A) {
8517   }
8518 
8519   /// See AbstractAttribute::initialize(...).
8520   void initialize(Attributor &A) override {
8521     // If it is a load instruction with range metadata, use the metadata.
8522     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
8523       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
8524         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
8525 
8526     AAValueConstantRangeImpl::initialize(A);
8527   }
8528 
8529   /// See AbstractAttribute::trackStatistics()
8530   void trackStatistics() const override {
8531     STATS_DECLTRACK_CSRET_ATTR(value_range)
8532   }
8533 };
8534 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
8535   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
8536       : AAValueConstantRangeFloating(IRP, A) {}
8537 
8538   /// See AbstractAttribute::manifest()
8539   ChangeStatus manifest(Attributor &A) override {
8540     return ChangeStatus::UNCHANGED;
8541   }
8542 
8543   /// See AbstractAttribute::trackStatistics()
8544   void trackStatistics() const override {
8545     STATS_DECLTRACK_CSARG_ATTR(value_range)
8546   }
8547 };
8548 
8549 /// ------------------ Potential Values Attribute -------------------------
8550 
8551 struct AAPotentialValuesImpl : AAPotentialValues {
8552   using StateType = PotentialConstantIntValuesState;
8553 
8554   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
8555       : AAPotentialValues(IRP, A) {}
8556 
8557   /// See AbstractAttribute::initialize(..).
8558   void initialize(Attributor &A) override {
8559     if (A.hasSimplificationCallback(getIRPosition()))
8560       indicatePessimisticFixpoint();
8561     else
8562       AAPotentialValues::initialize(A);
8563   }
8564 
8565   /// See AbstractAttribute::getAsStr().
8566   const std::string getAsStr() const override {
8567     std::string Str;
8568     llvm::raw_string_ostream OS(Str);
8569     OS << getState();
8570     return OS.str();
8571   }
8572 
8573   /// See AbstractAttribute::updateImpl(...).
8574   ChangeStatus updateImpl(Attributor &A) override {
8575     return indicatePessimisticFixpoint();
8576   }
8577 };
8578 
8579 struct AAPotentialValuesArgument final
8580     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8581                                       PotentialConstantIntValuesState> {
8582   using Base =
8583       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
8584                                       PotentialConstantIntValuesState>;
8585   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
8586       : Base(IRP, A) {}
8587 
8588   /// See AbstractAttribute::initialize(..).
8589   void initialize(Attributor &A) override {
8590     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
8591       indicatePessimisticFixpoint();
8592     } else {
8593       Base::initialize(A);
8594     }
8595   }
8596 
8597   /// See AbstractAttribute::trackStatistics()
8598   void trackStatistics() const override {
8599     STATS_DECLTRACK_ARG_ATTR(potential_values)
8600   }
8601 };
8602 
8603 struct AAPotentialValuesReturned
8604     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
8605   using Base =
8606       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
8607   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
8608       : Base(IRP, A) {}
8609 
8610   /// See AbstractAttribute::trackStatistics()
8611   void trackStatistics() const override {
8612     STATS_DECLTRACK_FNRET_ATTR(potential_values)
8613   }
8614 };
8615 
8616 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
8617   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
8618       : AAPotentialValuesImpl(IRP, A) {}
8619 
8620   /// See AbstractAttribute::initialize(..).
8621   void initialize(Attributor &A) override {
8622     AAPotentialValuesImpl::initialize(A);
8623     if (isAtFixpoint())
8624       return;
8625 
8626     Value &V = getAssociatedValue();
8627 
8628     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8629       unionAssumed(C->getValue());
8630       indicateOptimisticFixpoint();
8631       return;
8632     }
8633 
8634     if (isa<UndefValue>(&V)) {
8635       unionAssumedWithUndef();
8636       indicateOptimisticFixpoint();
8637       return;
8638     }
8639 
8640     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
8641       return;
8642 
8643     if (isa<SelectInst>(V) || isa<PHINode>(V) || isa<LoadInst>(V))
8644       return;
8645 
8646     indicatePessimisticFixpoint();
8647 
8648     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
8649                       << getAssociatedValue() << "\n");
8650   }
8651 
8652   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
8653                                 const APInt &RHS) {
8654     ICmpInst::Predicate Pred = ICI->getPredicate();
8655     switch (Pred) {
8656     case ICmpInst::ICMP_UGT:
8657       return LHS.ugt(RHS);
8658     case ICmpInst::ICMP_SGT:
8659       return LHS.sgt(RHS);
8660     case ICmpInst::ICMP_EQ:
8661       return LHS.eq(RHS);
8662     case ICmpInst::ICMP_UGE:
8663       return LHS.uge(RHS);
8664     case ICmpInst::ICMP_SGE:
8665       return LHS.sge(RHS);
8666     case ICmpInst::ICMP_ULT:
8667       return LHS.ult(RHS);
8668     case ICmpInst::ICMP_SLT:
8669       return LHS.slt(RHS);
8670     case ICmpInst::ICMP_NE:
8671       return LHS.ne(RHS);
8672     case ICmpInst::ICMP_ULE:
8673       return LHS.ule(RHS);
8674     case ICmpInst::ICMP_SLE:
8675       return LHS.sle(RHS);
8676     default:
8677       llvm_unreachable("Invalid ICmp predicate!");
8678     }
8679   }
8680 
8681   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
8682                                  uint32_t ResultBitWidth) {
8683     Instruction::CastOps CastOp = CI->getOpcode();
8684     switch (CastOp) {
8685     default:
8686       llvm_unreachable("unsupported or not integer cast");
8687     case Instruction::Trunc:
8688       return Src.trunc(ResultBitWidth);
8689     case Instruction::SExt:
8690       return Src.sext(ResultBitWidth);
8691     case Instruction::ZExt:
8692       return Src.zext(ResultBitWidth);
8693     case Instruction::BitCast:
8694       return Src;
8695     }
8696   }
8697 
8698   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
8699                                        const APInt &LHS, const APInt &RHS,
8700                                        bool &SkipOperation, bool &Unsupported) {
8701     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
8702     // Unsupported is set to true when the binary operator is not supported.
8703     // SkipOperation is set to true when UB occur with the given operand pair
8704     // (LHS, RHS).
8705     // TODO: we should look at nsw and nuw keywords to handle operations
8706     //       that create poison or undef value.
8707     switch (BinOpcode) {
8708     default:
8709       Unsupported = true;
8710       return LHS;
8711     case Instruction::Add:
8712       return LHS + RHS;
8713     case Instruction::Sub:
8714       return LHS - RHS;
8715     case Instruction::Mul:
8716       return LHS * RHS;
8717     case Instruction::UDiv:
8718       if (RHS.isZero()) {
8719         SkipOperation = true;
8720         return LHS;
8721       }
8722       return LHS.udiv(RHS);
8723     case Instruction::SDiv:
8724       if (RHS.isZero()) {
8725         SkipOperation = true;
8726         return LHS;
8727       }
8728       return LHS.sdiv(RHS);
8729     case Instruction::URem:
8730       if (RHS.isZero()) {
8731         SkipOperation = true;
8732         return LHS;
8733       }
8734       return LHS.urem(RHS);
8735     case Instruction::SRem:
8736       if (RHS.isZero()) {
8737         SkipOperation = true;
8738         return LHS;
8739       }
8740       return LHS.srem(RHS);
8741     case Instruction::Shl:
8742       return LHS.shl(RHS);
8743     case Instruction::LShr:
8744       return LHS.lshr(RHS);
8745     case Instruction::AShr:
8746       return LHS.ashr(RHS);
8747     case Instruction::And:
8748       return LHS & RHS;
8749     case Instruction::Or:
8750       return LHS | RHS;
8751     case Instruction::Xor:
8752       return LHS ^ RHS;
8753     }
8754   }
8755 
8756   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
8757                                            const APInt &LHS, const APInt &RHS) {
8758     bool SkipOperation = false;
8759     bool Unsupported = false;
8760     APInt Result =
8761         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
8762     if (Unsupported)
8763       return false;
8764     // If SkipOperation is true, we can ignore this operand pair (L, R).
8765     if (!SkipOperation)
8766       unionAssumed(Result);
8767     return isValidState();
8768   }
8769 
8770   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
8771     auto AssumedBefore = getAssumed();
8772     Value *LHS = ICI->getOperand(0);
8773     Value *RHS = ICI->getOperand(1);
8774 
8775     // Simplify the operands first.
8776     bool UsedAssumedInformation = false;
8777     const auto &SimplifiedLHS =
8778         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8779                                *this, UsedAssumedInformation);
8780     if (!SimplifiedLHS.hasValue())
8781       return ChangeStatus::UNCHANGED;
8782     if (!SimplifiedLHS.getValue())
8783       return indicatePessimisticFixpoint();
8784     LHS = *SimplifiedLHS;
8785 
8786     const auto &SimplifiedRHS =
8787         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8788                                *this, UsedAssumedInformation);
8789     if (!SimplifiedRHS.hasValue())
8790       return ChangeStatus::UNCHANGED;
8791     if (!SimplifiedRHS.getValue())
8792       return indicatePessimisticFixpoint();
8793     RHS = *SimplifiedRHS;
8794 
8795     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8796       return indicatePessimisticFixpoint();
8797 
8798     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8799                                                 DepClassTy::REQUIRED);
8800     if (!LHSAA.isValidState())
8801       return indicatePessimisticFixpoint();
8802 
8803     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8804                                                 DepClassTy::REQUIRED);
8805     if (!RHSAA.isValidState())
8806       return indicatePessimisticFixpoint();
8807 
8808     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
8809     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
8810 
8811     // TODO: make use of undef flag to limit potential values aggressively.
8812     bool MaybeTrue = false, MaybeFalse = false;
8813     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
8814     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
8815       // The result of any comparison between undefs can be soundly replaced
8816       // with undef.
8817       unionAssumedWithUndef();
8818     } else if (LHSAA.undefIsContained()) {
8819       for (const APInt &R : RHSAAPVS) {
8820         bool CmpResult = calculateICmpInst(ICI, Zero, R);
8821         MaybeTrue |= CmpResult;
8822         MaybeFalse |= !CmpResult;
8823         if (MaybeTrue & MaybeFalse)
8824           return indicatePessimisticFixpoint();
8825       }
8826     } else if (RHSAA.undefIsContained()) {
8827       for (const APInt &L : LHSAAPVS) {
8828         bool CmpResult = calculateICmpInst(ICI, L, Zero);
8829         MaybeTrue |= CmpResult;
8830         MaybeFalse |= !CmpResult;
8831         if (MaybeTrue & MaybeFalse)
8832           return indicatePessimisticFixpoint();
8833       }
8834     } else {
8835       for (const APInt &L : LHSAAPVS) {
8836         for (const APInt &R : RHSAAPVS) {
8837           bool CmpResult = calculateICmpInst(ICI, L, R);
8838           MaybeTrue |= CmpResult;
8839           MaybeFalse |= !CmpResult;
8840           if (MaybeTrue & MaybeFalse)
8841             return indicatePessimisticFixpoint();
8842         }
8843       }
8844     }
8845     if (MaybeTrue)
8846       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
8847     if (MaybeFalse)
8848       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
8849     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8850                                          : ChangeStatus::CHANGED;
8851   }
8852 
8853   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
8854     auto AssumedBefore = getAssumed();
8855     Value *LHS = SI->getTrueValue();
8856     Value *RHS = SI->getFalseValue();
8857 
8858     // Simplify the operands first.
8859     bool UsedAssumedInformation = false;
8860     const auto &SimplifiedLHS =
8861         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8862                                *this, UsedAssumedInformation);
8863     if (!SimplifiedLHS.hasValue())
8864       return ChangeStatus::UNCHANGED;
8865     if (!SimplifiedLHS.getValue())
8866       return indicatePessimisticFixpoint();
8867     LHS = *SimplifiedLHS;
8868 
8869     const auto &SimplifiedRHS =
8870         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8871                                *this, UsedAssumedInformation);
8872     if (!SimplifiedRHS.hasValue())
8873       return ChangeStatus::UNCHANGED;
8874     if (!SimplifiedRHS.getValue())
8875       return indicatePessimisticFixpoint();
8876     RHS = *SimplifiedRHS;
8877 
8878     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8879       return indicatePessimisticFixpoint();
8880 
8881     Optional<Constant *> C = A.getAssumedConstant(*SI->getCondition(), *this,
8882                                                   UsedAssumedInformation);
8883 
8884     // Check if we only need one operand.
8885     bool OnlyLeft = false, OnlyRight = false;
8886     if (C.hasValue() && *C && (*C)->isOneValue())
8887       OnlyLeft = true;
8888     else if (C.hasValue() && *C && (*C)->isZeroValue())
8889       OnlyRight = true;
8890 
8891     const AAPotentialValues *LHSAA = nullptr, *RHSAA = nullptr;
8892     if (!OnlyRight) {
8893       LHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8894                                              DepClassTy::REQUIRED);
8895       if (!LHSAA->isValidState())
8896         return indicatePessimisticFixpoint();
8897     }
8898     if (!OnlyLeft) {
8899       RHSAA = &A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8900                                              DepClassTy::REQUIRED);
8901       if (!RHSAA->isValidState())
8902         return indicatePessimisticFixpoint();
8903     }
8904 
8905     if (!LHSAA || !RHSAA) {
8906       // select (true/false), lhs, rhs
8907       auto *OpAA = LHSAA ? LHSAA : RHSAA;
8908 
8909       if (OpAA->undefIsContained())
8910         unionAssumedWithUndef();
8911       else
8912         unionAssumed(*OpAA);
8913 
8914     } else if (LHSAA->undefIsContained() && RHSAA->undefIsContained()) {
8915       // select i1 *, undef , undef => undef
8916       unionAssumedWithUndef();
8917     } else {
8918       unionAssumed(*LHSAA);
8919       unionAssumed(*RHSAA);
8920     }
8921     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8922                                          : ChangeStatus::CHANGED;
8923   }
8924 
8925   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
8926     auto AssumedBefore = getAssumed();
8927     if (!CI->isIntegerCast())
8928       return indicatePessimisticFixpoint();
8929     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
8930     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
8931     Value *Src = CI->getOperand(0);
8932 
8933     // Simplify the operand first.
8934     bool UsedAssumedInformation = false;
8935     const auto &SimplifiedSrc =
8936         A.getAssumedSimplified(IRPosition::value(*Src, getCallBaseContext()),
8937                                *this, UsedAssumedInformation);
8938     if (!SimplifiedSrc.hasValue())
8939       return ChangeStatus::UNCHANGED;
8940     if (!SimplifiedSrc.getValue())
8941       return indicatePessimisticFixpoint();
8942     Src = *SimplifiedSrc;
8943 
8944     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
8945                                                 DepClassTy::REQUIRED);
8946     if (!SrcAA.isValidState())
8947       return indicatePessimisticFixpoint();
8948     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
8949     if (SrcAA.undefIsContained())
8950       unionAssumedWithUndef();
8951     else {
8952       for (const APInt &S : SrcAAPVS) {
8953         APInt T = calculateCastInst(CI, S, ResultBitWidth);
8954         unionAssumed(T);
8955       }
8956     }
8957     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8958                                          : ChangeStatus::CHANGED;
8959   }
8960 
8961   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
8962     auto AssumedBefore = getAssumed();
8963     Value *LHS = BinOp->getOperand(0);
8964     Value *RHS = BinOp->getOperand(1);
8965 
8966     // Simplify the operands first.
8967     bool UsedAssumedInformation = false;
8968     const auto &SimplifiedLHS =
8969         A.getAssumedSimplified(IRPosition::value(*LHS, getCallBaseContext()),
8970                                *this, UsedAssumedInformation);
8971     if (!SimplifiedLHS.hasValue())
8972       return ChangeStatus::UNCHANGED;
8973     if (!SimplifiedLHS.getValue())
8974       return indicatePessimisticFixpoint();
8975     LHS = *SimplifiedLHS;
8976 
8977     const auto &SimplifiedRHS =
8978         A.getAssumedSimplified(IRPosition::value(*RHS, getCallBaseContext()),
8979                                *this, UsedAssumedInformation);
8980     if (!SimplifiedRHS.hasValue())
8981       return ChangeStatus::UNCHANGED;
8982     if (!SimplifiedRHS.getValue())
8983       return indicatePessimisticFixpoint();
8984     RHS = *SimplifiedRHS;
8985 
8986     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
8987       return indicatePessimisticFixpoint();
8988 
8989     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
8990                                                 DepClassTy::REQUIRED);
8991     if (!LHSAA.isValidState())
8992       return indicatePessimisticFixpoint();
8993 
8994     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
8995                                                 DepClassTy::REQUIRED);
8996     if (!RHSAA.isValidState())
8997       return indicatePessimisticFixpoint();
8998 
8999     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
9000     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
9001     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
9002 
9003     // TODO: make use of undef flag to limit potential values aggressively.
9004     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
9005       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
9006         return indicatePessimisticFixpoint();
9007     } else if (LHSAA.undefIsContained()) {
9008       for (const APInt &R : RHSAAPVS) {
9009         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
9010           return indicatePessimisticFixpoint();
9011       }
9012     } else if (RHSAA.undefIsContained()) {
9013       for (const APInt &L : LHSAAPVS) {
9014         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
9015           return indicatePessimisticFixpoint();
9016       }
9017     } else {
9018       for (const APInt &L : LHSAAPVS) {
9019         for (const APInt &R : RHSAAPVS) {
9020           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
9021             return indicatePessimisticFixpoint();
9022         }
9023       }
9024     }
9025     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9026                                          : ChangeStatus::CHANGED;
9027   }
9028 
9029   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
9030     auto AssumedBefore = getAssumed();
9031     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
9032       Value *IncomingValue = PHI->getIncomingValue(u);
9033 
9034       // Simplify the operand first.
9035       bool UsedAssumedInformation = false;
9036       const auto &SimplifiedIncomingValue = A.getAssumedSimplified(
9037           IRPosition::value(*IncomingValue, getCallBaseContext()), *this,
9038           UsedAssumedInformation);
9039       if (!SimplifiedIncomingValue.hasValue())
9040         continue;
9041       if (!SimplifiedIncomingValue.getValue())
9042         return indicatePessimisticFixpoint();
9043       IncomingValue = *SimplifiedIncomingValue;
9044 
9045       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
9046           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
9047       if (!PotentialValuesAA.isValidState())
9048         return indicatePessimisticFixpoint();
9049       if (PotentialValuesAA.undefIsContained())
9050         unionAssumedWithUndef();
9051       else
9052         unionAssumed(PotentialValuesAA.getAssumed());
9053     }
9054     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9055                                          : ChangeStatus::CHANGED;
9056   }
9057 
9058   ChangeStatus updateWithLoad(Attributor &A, LoadInst &L) {
9059     if (!L.getType()->isIntegerTy())
9060       return indicatePessimisticFixpoint();
9061 
9062     auto Union = [&](Value &V) {
9063       if (isa<UndefValue>(V)) {
9064         unionAssumedWithUndef();
9065         return true;
9066       }
9067       if (ConstantInt *CI = dyn_cast<ConstantInt>(&V)) {
9068         unionAssumed(CI->getValue());
9069         return true;
9070       }
9071       return false;
9072     };
9073     auto AssumedBefore = getAssumed();
9074 
9075     if (!AAValueSimplifyImpl::handleLoad(A, *this, L, Union))
9076       return indicatePessimisticFixpoint();
9077 
9078     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9079                                          : ChangeStatus::CHANGED;
9080   }
9081 
9082   /// See AbstractAttribute::updateImpl(...).
9083   ChangeStatus updateImpl(Attributor &A) override {
9084     Value &V = getAssociatedValue();
9085     Instruction *I = dyn_cast<Instruction>(&V);
9086 
9087     if (auto *ICI = dyn_cast<ICmpInst>(I))
9088       return updateWithICmpInst(A, ICI);
9089 
9090     if (auto *SI = dyn_cast<SelectInst>(I))
9091       return updateWithSelectInst(A, SI);
9092 
9093     if (auto *CI = dyn_cast<CastInst>(I))
9094       return updateWithCastInst(A, CI);
9095 
9096     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
9097       return updateWithBinaryOperator(A, BinOp);
9098 
9099     if (auto *PHI = dyn_cast<PHINode>(I))
9100       return updateWithPHINode(A, PHI);
9101 
9102     if (auto *L = dyn_cast<LoadInst>(I))
9103       return updateWithLoad(A, *L);
9104 
9105     return indicatePessimisticFixpoint();
9106   }
9107 
9108   /// See AbstractAttribute::trackStatistics()
9109   void trackStatistics() const override {
9110     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
9111   }
9112 };
9113 
9114 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
9115   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
9116       : AAPotentialValuesImpl(IRP, A) {}
9117 
9118   /// See AbstractAttribute::initialize(...).
9119   ChangeStatus updateImpl(Attributor &A) override {
9120     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
9121                      "not be called");
9122   }
9123 
9124   /// See AbstractAttribute::trackStatistics()
9125   void trackStatistics() const override {
9126     STATS_DECLTRACK_FN_ATTR(potential_values)
9127   }
9128 };
9129 
9130 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
9131   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
9132       : AAPotentialValuesFunction(IRP, A) {}
9133 
9134   /// See AbstractAttribute::trackStatistics()
9135   void trackStatistics() const override {
9136     STATS_DECLTRACK_CS_ATTR(potential_values)
9137   }
9138 };
9139 
9140 struct AAPotentialValuesCallSiteReturned
9141     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
9142   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
9143       : AACallSiteReturnedFromReturned<AAPotentialValues,
9144                                        AAPotentialValuesImpl>(IRP, A) {}
9145 
9146   /// See AbstractAttribute::trackStatistics()
9147   void trackStatistics() const override {
9148     STATS_DECLTRACK_CSRET_ATTR(potential_values)
9149   }
9150 };
9151 
9152 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
9153   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
9154       : AAPotentialValuesFloating(IRP, A) {}
9155 
9156   /// See AbstractAttribute::initialize(..).
9157   void initialize(Attributor &A) override {
9158     AAPotentialValuesImpl::initialize(A);
9159     if (isAtFixpoint())
9160       return;
9161 
9162     Value &V = getAssociatedValue();
9163 
9164     if (auto *C = dyn_cast<ConstantInt>(&V)) {
9165       unionAssumed(C->getValue());
9166       indicateOptimisticFixpoint();
9167       return;
9168     }
9169 
9170     if (isa<UndefValue>(&V)) {
9171       unionAssumedWithUndef();
9172       indicateOptimisticFixpoint();
9173       return;
9174     }
9175   }
9176 
9177   /// See AbstractAttribute::updateImpl(...).
9178   ChangeStatus updateImpl(Attributor &A) override {
9179     Value &V = getAssociatedValue();
9180     auto AssumedBefore = getAssumed();
9181     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
9182                                              DepClassTy::REQUIRED);
9183     const auto &S = AA.getAssumed();
9184     unionAssumed(S);
9185     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
9186                                          : ChangeStatus::CHANGED;
9187   }
9188 
9189   /// See AbstractAttribute::trackStatistics()
9190   void trackStatistics() const override {
9191     STATS_DECLTRACK_CSARG_ATTR(potential_values)
9192   }
9193 };
9194 
9195 /// ------------------------ NoUndef Attribute ---------------------------------
9196 struct AANoUndefImpl : AANoUndef {
9197   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
9198 
9199   /// See AbstractAttribute::initialize(...).
9200   void initialize(Attributor &A) override {
9201     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
9202       indicateOptimisticFixpoint();
9203       return;
9204     }
9205     Value &V = getAssociatedValue();
9206     if (isa<UndefValue>(V))
9207       indicatePessimisticFixpoint();
9208     else if (isa<FreezeInst>(V))
9209       indicateOptimisticFixpoint();
9210     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
9211              isGuaranteedNotToBeUndefOrPoison(&V))
9212       indicateOptimisticFixpoint();
9213     else
9214       AANoUndef::initialize(A);
9215   }
9216 
9217   /// See followUsesInMBEC
9218   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
9219                        AANoUndef::StateType &State) {
9220     const Value *UseV = U->get();
9221     const DominatorTree *DT = nullptr;
9222     AssumptionCache *AC = nullptr;
9223     InformationCache &InfoCache = A.getInfoCache();
9224     if (Function *F = getAnchorScope()) {
9225       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
9226       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
9227     }
9228     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
9229     bool TrackUse = false;
9230     // Track use for instructions which must produce undef or poison bits when
9231     // at least one operand contains such bits.
9232     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
9233       TrackUse = true;
9234     return TrackUse;
9235   }
9236 
9237   /// See AbstractAttribute::getAsStr().
9238   const std::string getAsStr() const override {
9239     return getAssumed() ? "noundef" : "may-undef-or-poison";
9240   }
9241 
9242   ChangeStatus manifest(Attributor &A) override {
9243     // We don't manifest noundef attribute for dead positions because the
9244     // associated values with dead positions would be replaced with undef
9245     // values.
9246     bool UsedAssumedInformation = false;
9247     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr,
9248                         UsedAssumedInformation))
9249       return ChangeStatus::UNCHANGED;
9250     // A position whose simplified value does not have any value is
9251     // considered to be dead. We don't manifest noundef in such positions for
9252     // the same reason above.
9253     if (!A.getAssumedSimplified(getIRPosition(), *this, UsedAssumedInformation)
9254              .hasValue())
9255       return ChangeStatus::UNCHANGED;
9256     return AANoUndef::manifest(A);
9257   }
9258 };
9259 
9260 struct AANoUndefFloating : public AANoUndefImpl {
9261   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
9262       : AANoUndefImpl(IRP, A) {}
9263 
9264   /// See AbstractAttribute::initialize(...).
9265   void initialize(Attributor &A) override {
9266     AANoUndefImpl::initialize(A);
9267     if (!getState().isAtFixpoint())
9268       if (Instruction *CtxI = getCtxI())
9269         followUsesInMBEC(*this, A, getState(), *CtxI);
9270   }
9271 
9272   /// See AbstractAttribute::updateImpl(...).
9273   ChangeStatus updateImpl(Attributor &A) override {
9274     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
9275                             AANoUndef::StateType &T, bool Stripped) -> bool {
9276       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
9277                                              DepClassTy::REQUIRED);
9278       if (!Stripped && this == &AA) {
9279         T.indicatePessimisticFixpoint();
9280       } else {
9281         const AANoUndef::StateType &S =
9282             static_cast<const AANoUndef::StateType &>(AA.getState());
9283         T ^= S;
9284       }
9285       return T.isValidState();
9286     };
9287 
9288     StateType T;
9289     if (!genericValueTraversal<StateType>(A, getIRPosition(), *this, T,
9290                                           VisitValueCB, getCtxI()))
9291       return indicatePessimisticFixpoint();
9292 
9293     return clampStateAndIndicateChange(getState(), T);
9294   }
9295 
9296   /// See AbstractAttribute::trackStatistics()
9297   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9298 };
9299 
9300 struct AANoUndefReturned final
9301     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
9302   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
9303       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
9304 
9305   /// See AbstractAttribute::trackStatistics()
9306   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
9307 };
9308 
9309 struct AANoUndefArgument final
9310     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
9311   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
9312       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
9313 
9314   /// See AbstractAttribute::trackStatistics()
9315   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
9316 };
9317 
9318 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
9319   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
9320       : AANoUndefFloating(IRP, A) {}
9321 
9322   /// See AbstractAttribute::trackStatistics()
9323   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
9324 };
9325 
9326 struct AANoUndefCallSiteReturned final
9327     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
9328   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
9329       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
9330 
9331   /// See AbstractAttribute::trackStatistics()
9332   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
9333 };
9334 
9335 struct AACallEdgesImpl : public AACallEdges {
9336   AACallEdgesImpl(const IRPosition &IRP, Attributor &A) : AACallEdges(IRP, A) {}
9337 
9338   virtual const SetVector<Function *> &getOptimisticEdges() const override {
9339     return CalledFunctions;
9340   }
9341 
9342   virtual bool hasUnknownCallee() const override { return HasUnknownCallee; }
9343 
9344   virtual bool hasNonAsmUnknownCallee() const override {
9345     return HasUnknownCalleeNonAsm;
9346   }
9347 
9348   const std::string getAsStr() const override {
9349     return "CallEdges[" + std::to_string(HasUnknownCallee) + "," +
9350            std::to_string(CalledFunctions.size()) + "]";
9351   }
9352 
9353   void trackStatistics() const override {}
9354 
9355 protected:
9356   void addCalledFunction(Function *Fn, ChangeStatus &Change) {
9357     if (CalledFunctions.insert(Fn)) {
9358       Change = ChangeStatus::CHANGED;
9359       LLVM_DEBUG(dbgs() << "[AACallEdges] New call edge: " << Fn->getName()
9360                         << "\n");
9361     }
9362   }
9363 
9364   void setHasUnknownCallee(bool NonAsm, ChangeStatus &Change) {
9365     if (!HasUnknownCallee)
9366       Change = ChangeStatus::CHANGED;
9367     if (NonAsm && !HasUnknownCalleeNonAsm)
9368       Change = ChangeStatus::CHANGED;
9369     HasUnknownCalleeNonAsm |= NonAsm;
9370     HasUnknownCallee = true;
9371   }
9372 
9373 private:
9374   /// Optimistic set of functions that might be called by this position.
9375   SetVector<Function *> CalledFunctions;
9376 
9377   /// Is there any call with a unknown callee.
9378   bool HasUnknownCallee = false;
9379 
9380   /// Is there any call with a unknown callee, excluding any inline asm.
9381   bool HasUnknownCalleeNonAsm = false;
9382 };
9383 
9384 struct AACallEdgesCallSite : public AACallEdgesImpl {
9385   AACallEdgesCallSite(const IRPosition &IRP, Attributor &A)
9386       : AACallEdgesImpl(IRP, A) {}
9387   /// See AbstractAttribute::updateImpl(...).
9388   ChangeStatus updateImpl(Attributor &A) override {
9389     ChangeStatus Change = ChangeStatus::UNCHANGED;
9390 
9391     auto VisitValue = [&](Value &V, const Instruction *CtxI, bool &HasUnknown,
9392                           bool Stripped) -> bool {
9393       if (Function *Fn = dyn_cast<Function>(&V)) {
9394         addCalledFunction(Fn, Change);
9395       } else {
9396         LLVM_DEBUG(dbgs() << "[AACallEdges] Unrecognized value: " << V << "\n");
9397         setHasUnknownCallee(true, Change);
9398       }
9399 
9400       // Explore all values.
9401       return true;
9402     };
9403 
9404     // Process any value that we might call.
9405     auto ProcessCalledOperand = [&](Value *V) {
9406       bool DummyValue = false;
9407       if (!genericValueTraversal<bool>(A, IRPosition::value(*V), *this,
9408                                        DummyValue, VisitValue, nullptr,
9409                                        false)) {
9410         // If we haven't gone through all values, assume that there are unknown
9411         // callees.
9412         setHasUnknownCallee(true, Change);
9413       }
9414     };
9415 
9416     CallBase *CB = static_cast<CallBase *>(getCtxI());
9417 
9418     if (CB->isInlineAsm()) {
9419       setHasUnknownCallee(false, Change);
9420       return Change;
9421     }
9422 
9423     // Process callee metadata if available.
9424     if (auto *MD = getCtxI()->getMetadata(LLVMContext::MD_callees)) {
9425       for (auto &Op : MD->operands()) {
9426         Function *Callee = mdconst::dyn_extract_or_null<Function>(Op);
9427         if (Callee)
9428           addCalledFunction(Callee, Change);
9429       }
9430       return Change;
9431     }
9432 
9433     // The most simple case.
9434     ProcessCalledOperand(CB->getCalledOperand());
9435 
9436     // Process callback functions.
9437     SmallVector<const Use *, 4u> CallbackUses;
9438     AbstractCallSite::getCallbackUses(*CB, CallbackUses);
9439     for (const Use *U : CallbackUses)
9440       ProcessCalledOperand(U->get());
9441 
9442     return Change;
9443   }
9444 };
9445 
9446 struct AACallEdgesFunction : public AACallEdgesImpl {
9447   AACallEdgesFunction(const IRPosition &IRP, Attributor &A)
9448       : AACallEdgesImpl(IRP, A) {}
9449 
9450   /// See AbstractAttribute::updateImpl(...).
9451   ChangeStatus updateImpl(Attributor &A) override {
9452     ChangeStatus Change = ChangeStatus::UNCHANGED;
9453 
9454     auto ProcessCallInst = [&](Instruction &Inst) {
9455       CallBase &CB = static_cast<CallBase &>(Inst);
9456 
9457       auto &CBEdges = A.getAAFor<AACallEdges>(
9458           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9459       if (CBEdges.hasNonAsmUnknownCallee())
9460         setHasUnknownCallee(true, Change);
9461       if (CBEdges.hasUnknownCallee())
9462         setHasUnknownCallee(false, Change);
9463 
9464       for (Function *F : CBEdges.getOptimisticEdges())
9465         addCalledFunction(F, Change);
9466 
9467       return true;
9468     };
9469 
9470     // Visit all callable instructions.
9471     bool UsedAssumedInformation = false;
9472     if (!A.checkForAllCallLikeInstructions(ProcessCallInst, *this,
9473                                            UsedAssumedInformation)) {
9474       // If we haven't looked at all call like instructions, assume that there
9475       // are unknown callees.
9476       setHasUnknownCallee(true, Change);
9477     }
9478 
9479     return Change;
9480   }
9481 };
9482 
9483 struct AAFunctionReachabilityFunction : public AAFunctionReachability {
9484 private:
9485   struct QuerySet {
9486     void markReachable(Function *Fn) {
9487       Reachable.insert(Fn);
9488       Unreachable.erase(Fn);
9489     }
9490 
9491     ChangeStatus update(Attributor &A, const AAFunctionReachability &AA,
9492                         ArrayRef<const AACallEdges *> AAEdgesList) {
9493       ChangeStatus Change = ChangeStatus::UNCHANGED;
9494 
9495       for (auto *AAEdges : AAEdgesList) {
9496         if (AAEdges->hasUnknownCallee()) {
9497           if (!CanReachUnknownCallee)
9498             Change = ChangeStatus::CHANGED;
9499           CanReachUnknownCallee = true;
9500           return Change;
9501         }
9502       }
9503 
9504       for (Function *Fn : make_early_inc_range(Unreachable)) {
9505         if (checkIfReachable(A, AA, AAEdgesList, Fn)) {
9506           Change = ChangeStatus::CHANGED;
9507           markReachable(Fn);
9508         }
9509       }
9510       return Change;
9511     }
9512 
9513     bool isReachable(Attributor &A, const AAFunctionReachability &AA,
9514                      ArrayRef<const AACallEdges *> AAEdgesList, Function *Fn) {
9515       // Assume that we can reach the function.
9516       // TODO: Be more specific with the unknown callee.
9517       if (CanReachUnknownCallee)
9518         return true;
9519 
9520       if (Reachable.count(Fn))
9521         return true;
9522 
9523       if (Unreachable.count(Fn))
9524         return false;
9525 
9526       // We need to assume that this function can't reach Fn to prevent
9527       // an infinite loop if this function is recursive.
9528       Unreachable.insert(Fn);
9529 
9530       bool Result = checkIfReachable(A, AA, AAEdgesList, Fn);
9531       if (Result)
9532         markReachable(Fn);
9533       return Result;
9534     }
9535 
9536     bool checkIfReachable(Attributor &A, const AAFunctionReachability &AA,
9537                           ArrayRef<const AACallEdges *> AAEdgesList,
9538                           Function *Fn) const {
9539 
9540       // Handle the most trivial case first.
9541       for (auto *AAEdges : AAEdgesList) {
9542         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9543 
9544         if (Edges.count(Fn))
9545           return true;
9546       }
9547 
9548       SmallVector<const AAFunctionReachability *, 8> Deps;
9549       for (auto &AAEdges : AAEdgesList) {
9550         const SetVector<Function *> &Edges = AAEdges->getOptimisticEdges();
9551 
9552         for (Function *Edge : Edges) {
9553           // We don't need a dependency if the result is reachable.
9554           const AAFunctionReachability &EdgeReachability =
9555               A.getAAFor<AAFunctionReachability>(
9556                   AA, IRPosition::function(*Edge), DepClassTy::NONE);
9557           Deps.push_back(&EdgeReachability);
9558 
9559           if (EdgeReachability.canReach(A, Fn))
9560             return true;
9561         }
9562       }
9563 
9564       // The result is false for now, set dependencies and leave.
9565       for (auto Dep : Deps)
9566         A.recordDependence(AA, *Dep, DepClassTy::REQUIRED);
9567 
9568       return false;
9569     }
9570 
9571     /// Set of functions that we know for sure is reachable.
9572     DenseSet<Function *> Reachable;
9573 
9574     /// Set of functions that are unreachable, but might become reachable.
9575     DenseSet<Function *> Unreachable;
9576 
9577     /// If we can reach a function with a call to a unknown function we assume
9578     /// that we can reach any function.
9579     bool CanReachUnknownCallee = false;
9580   };
9581 
9582 public:
9583   AAFunctionReachabilityFunction(const IRPosition &IRP, Attributor &A)
9584       : AAFunctionReachability(IRP, A) {}
9585 
9586   bool canReach(Attributor &A, Function *Fn) const override {
9587     const AACallEdges &AAEdges =
9588         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9589 
9590     // Attributor returns attributes as const, so this function has to be
9591     // const for users of this attribute to use it without having to do
9592     // a const_cast.
9593     // This is a hack for us to be able to cache queries.
9594     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9595     bool Result =
9596         NonConstThis->WholeFunction.isReachable(A, *this, {&AAEdges}, Fn);
9597 
9598     return Result;
9599   }
9600 
9601   /// Can \p CB reach \p Fn
9602   bool canReach(Attributor &A, CallBase &CB, Function *Fn) const override {
9603     const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9604         *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
9605 
9606     // Attributor returns attributes as const, so this function has to be
9607     // const for users of this attribute to use it without having to do
9608     // a const_cast.
9609     // This is a hack for us to be able to cache queries.
9610     auto *NonConstThis = const_cast<AAFunctionReachabilityFunction *>(this);
9611     QuerySet &CBQuery = NonConstThis->CBQueries[&CB];
9612 
9613     bool Result = CBQuery.isReachable(A, *this, {&AAEdges}, Fn);
9614 
9615     return Result;
9616   }
9617 
9618   /// See AbstractAttribute::updateImpl(...).
9619   ChangeStatus updateImpl(Attributor &A) override {
9620     const AACallEdges &AAEdges =
9621         A.getAAFor<AACallEdges>(*this, getIRPosition(), DepClassTy::REQUIRED);
9622     ChangeStatus Change = ChangeStatus::UNCHANGED;
9623 
9624     Change |= WholeFunction.update(A, *this, {&AAEdges});
9625 
9626     for (auto CBPair : CBQueries) {
9627       const AACallEdges &AAEdges = A.getAAFor<AACallEdges>(
9628           *this, IRPosition::callsite_function(*CBPair.first),
9629           DepClassTy::REQUIRED);
9630 
9631       Change |= CBPair.second.update(A, *this, {&AAEdges});
9632     }
9633 
9634     return Change;
9635   }
9636 
9637   const std::string getAsStr() const override {
9638     size_t QueryCount =
9639         WholeFunction.Reachable.size() + WholeFunction.Unreachable.size();
9640 
9641     return "FunctionReachability [" +
9642            std::to_string(WholeFunction.Reachable.size()) + "," +
9643            std::to_string(QueryCount) + "]";
9644   }
9645 
9646   void trackStatistics() const override {}
9647 private:
9648   bool canReachUnknownCallee() const override {
9649     return WholeFunction.CanReachUnknownCallee;
9650   }
9651 
9652   /// Used to answer if a the whole function can reacha a specific function.
9653   QuerySet WholeFunction;
9654 
9655   /// Used to answer if a call base inside this function can reach a specific
9656   /// function.
9657   DenseMap<CallBase *, QuerySet> CBQueries;
9658 };
9659 
9660 } // namespace
9661 
9662 AACallGraphNode *AACallEdgeIterator::operator*() const {
9663   return static_cast<AACallGraphNode *>(const_cast<AACallEdges *>(
9664       &A.getOrCreateAAFor<AACallEdges>(IRPosition::function(**I))));
9665 }
9666 
9667 void AttributorCallGraph::print() { llvm::WriteGraph(outs(), this); }
9668 
9669 const char AAReturnedValues::ID = 0;
9670 const char AANoUnwind::ID = 0;
9671 const char AANoSync::ID = 0;
9672 const char AANoFree::ID = 0;
9673 const char AANonNull::ID = 0;
9674 const char AANoRecurse::ID = 0;
9675 const char AAWillReturn::ID = 0;
9676 const char AAUndefinedBehavior::ID = 0;
9677 const char AANoAlias::ID = 0;
9678 const char AAReachability::ID = 0;
9679 const char AANoReturn::ID = 0;
9680 const char AAIsDead::ID = 0;
9681 const char AADereferenceable::ID = 0;
9682 const char AAAlign::ID = 0;
9683 const char AANoCapture::ID = 0;
9684 const char AAValueSimplify::ID = 0;
9685 const char AAHeapToStack::ID = 0;
9686 const char AAPrivatizablePtr::ID = 0;
9687 const char AAMemoryBehavior::ID = 0;
9688 const char AAMemoryLocation::ID = 0;
9689 const char AAValueConstantRange::ID = 0;
9690 const char AAPotentialValues::ID = 0;
9691 const char AANoUndef::ID = 0;
9692 const char AACallEdges::ID = 0;
9693 const char AAFunctionReachability::ID = 0;
9694 const char AAPointerInfo::ID = 0;
9695 
9696 // Macro magic to create the static generator function for attributes that
9697 // follow the naming scheme.
9698 
9699 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
9700   case IRPosition::PK:                                                         \
9701     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
9702 
9703 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
9704   case IRPosition::PK:                                                         \
9705     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
9706     ++NumAAs;                                                                  \
9707     break;
9708 
9709 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
9710   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9711     CLASS *AA = nullptr;                                                       \
9712     switch (IRP.getPositionKind()) {                                           \
9713       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9714       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9715       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9716       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9717       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9718       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9719       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9720       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9721     }                                                                          \
9722     return *AA;                                                                \
9723   }
9724 
9725 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
9726   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9727     CLASS *AA = nullptr;                                                       \
9728     switch (IRP.getPositionKind()) {                                           \
9729       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9730       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
9731       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9732       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9733       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9734       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9735       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9736       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9737     }                                                                          \
9738     return *AA;                                                                \
9739   }
9740 
9741 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
9742   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9743     CLASS *AA = nullptr;                                                       \
9744     switch (IRP.getPositionKind()) {                                           \
9745       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9746       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9747       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9748       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9749       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9750       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
9751       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9752       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9753     }                                                                          \
9754     return *AA;                                                                \
9755   }
9756 
9757 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
9758   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9759     CLASS *AA = nullptr;                                                       \
9760     switch (IRP.getPositionKind()) {                                           \
9761       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9762       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
9763       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
9764       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9765       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
9766       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
9767       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
9768       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9769     }                                                                          \
9770     return *AA;                                                                \
9771   }
9772 
9773 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
9774   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
9775     CLASS *AA = nullptr;                                                       \
9776     switch (IRP.getPositionKind()) {                                           \
9777       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
9778       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
9779       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
9780       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
9781       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
9782       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
9783       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
9784       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
9785     }                                                                          \
9786     return *AA;                                                                \
9787   }
9788 
9789 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
9790 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
9791 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
9792 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
9793 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
9794 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
9795 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
9796 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AACallEdges)
9797 
9798 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
9799 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
9800 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
9801 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
9802 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
9803 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
9804 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
9805 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
9806 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
9807 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPointerInfo)
9808 
9809 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
9810 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
9811 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
9812 
9813 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
9814 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
9815 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
9816 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAFunctionReachability)
9817 
9818 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
9819 
9820 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
9821 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
9822 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
9823 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
9824 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
9825 #undef SWITCH_PK_CREATE
9826 #undef SWITCH_PK_INV
9827