1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (auto *LI = dyn_cast<LoadInst>(I)) {
159     if (!AllowVolatile && LI->isVolatile())
160       return nullptr;
161     return LI->getPointerOperand();
162   }
163 
164   if (auto *SI = dyn_cast<StoreInst>(I)) {
165     if (!AllowVolatile && SI->isVolatile())
166       return nullptr;
167     return SI->getPointerOperand();
168   }
169 
170   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
171     if (!AllowVolatile && CXI->isVolatile())
172       return nullptr;
173     return CXI->getPointerOperand();
174   }
175 
176   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
177     if (!AllowVolatile && RMWI->isVolatile())
178       return nullptr;
179     return RMWI->getPointerOperand();
180   }
181 
182   return nullptr;
183 }
184 
185 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
186 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
187 /// getelement pointer instructions that traverse the natural type of \p Ptr if
188 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
189 /// through a cast to i8*.
190 ///
191 /// TODO: This could probably live somewhere more prominantly if it doesn't
192 ///       already exist.
193 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
194                                int64_t Offset, IRBuilder<NoFolder> &IRB,
195                                const DataLayout &DL) {
196   assert(Offset >= 0 && "Negative offset not supported yet!");
197   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
198                     << "-bytes as " << *ResTy << "\n");
199 
200   if (Offset) {
201     SmallVector<Value *, 4> Indices;
202     std::string GEPName = Ptr->getName().str() + ".0";
203 
204     // Add 0 index to look through the pointer.
205     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
206            "Offset out of bounds");
207     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
208 
209     Type *Ty = PtrElemTy;
210     do {
211       auto *STy = dyn_cast<StructType>(Ty);
212       if (!STy)
213         // Non-aggregate type, we cast and make byte-wise progress now.
214         break;
215 
216       const StructLayout *SL = DL.getStructLayout(STy);
217       if (int64_t(SL->getSizeInBytes()) < Offset)
218         break;
219 
220       uint64_t Idx = SL->getElementContainingOffset(Offset);
221       assert(Idx < STy->getNumElements() && "Offset calculation error!");
222       uint64_t Rem = Offset - SL->getElementOffset(Idx);
223       Ty = STy->getElementType(Idx);
224 
225       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
226                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
227 
228       GEPName += "." + std::to_string(Idx);
229       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
230       Offset = Rem;
231     } while (Offset);
232 
233     // Create a GEP for the indices collected above.
234     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
235 
236     // If an offset is left we use byte-wise adjustment.
237     if (Offset) {
238       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
239       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
240                           GEPName + ".b" + Twine(Offset));
241     }
242   }
243 
244   // Ensure the result has the requested type.
245   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
246 
247   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
248   return Ptr;
249 }
250 
251 /// Recursively visit all values that might become \p IRP at some point. This
252 /// will be done by looking through cast instructions, selects, phis, and calls
253 /// with the "returned" attribute. Once we cannot look through the value any
254 /// further, the callback \p VisitValueCB is invoked and passed the current
255 /// value, the \p State, and a flag to indicate if we stripped anything.
256 /// Stripped means that we unpacked the value associated with \p IRP at least
257 /// once. Note that the value used for the callback may still be the value
258 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
259 /// we will never visit more values than specified by \p MaxValues.
260 template <typename AAType, typename StateTy>
261 static bool genericValueTraversal(
262     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
263     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
264         VisitValueCB,
265     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
266     function_ref<Value *(Value *)> StripCB = nullptr) {
267 
268   const AAIsDead *LivenessAA = nullptr;
269   if (IRP.getAnchorScope())
270     LivenessAA = &A.getAAFor<AAIsDead>(
271         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
272         DepClassTy::NONE);
273   bool AnyDead = false;
274 
275   using Item = std::pair<Value *, const Instruction *>;
276   SmallSet<Item, 16> Visited;
277   SmallVector<Item, 16> Worklist;
278   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
279 
280   int Iteration = 0;
281   do {
282     Item I = Worklist.pop_back_val();
283     Value *V = I.first;
284     CtxI = I.second;
285     if (StripCB)
286       V = StripCB(V);
287 
288     // Check if we should process the current value. To prevent endless
289     // recursion keep a record of the values we followed!
290     if (!Visited.insert(I).second)
291       continue;
292 
293     // Make sure we limit the compile time for complex expressions.
294     if (Iteration++ >= MaxValues)
295       return false;
296 
297     // Explicitly look through calls with a "returned" attribute if we do
298     // not have a pointer as stripPointerCasts only works on them.
299     Value *NewV = nullptr;
300     if (V->getType()->isPointerTy()) {
301       NewV = V->stripPointerCasts();
302     } else {
303       auto *CB = dyn_cast<CallBase>(V);
304       if (CB && CB->getCalledFunction()) {
305         for (Argument &Arg : CB->getCalledFunction()->args())
306           if (Arg.hasReturnedAttr()) {
307             NewV = CB->getArgOperand(Arg.getArgNo());
308             break;
309           }
310       }
311     }
312     if (NewV && NewV != V) {
313       Worklist.push_back({NewV, CtxI});
314       continue;
315     }
316 
317     // Look through select instructions, visit both potential values.
318     if (auto *SI = dyn_cast<SelectInst>(V)) {
319       Worklist.push_back({SI->getTrueValue(), CtxI});
320       Worklist.push_back({SI->getFalseValue(), CtxI});
321       continue;
322     }
323 
324     // Look through phi nodes, visit all live operands.
325     if (auto *PHI = dyn_cast<PHINode>(V)) {
326       assert(LivenessAA &&
327              "Expected liveness in the presence of instructions!");
328       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
329         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
330         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
331                             LivenessAA,
332                             /* CheckBBLivenessOnly */ true)) {
333           AnyDead = true;
334           continue;
335         }
336         Worklist.push_back(
337             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
338       }
339       continue;
340     }
341 
342     if (UseValueSimplify && !isa<Constant>(V)) {
343       bool UsedAssumedInformation = false;
344       Optional<Constant *> C =
345           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
346       if (!C.hasValue())
347         continue;
348       if (Value *NewV = C.getValue()) {
349         Worklist.push_back({NewV, CtxI});
350         continue;
351       }
352     }
353 
354     // Once a leaf is reached we inform the user through the callback.
355     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
356       return false;
357   } while (!Worklist.empty());
358 
359   // If we actually used liveness information so we have to record a dependence.
360   if (AnyDead)
361     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
362 
363   // All values have been visited.
364   return true;
365 }
366 
367 const Value *stripAndAccumulateMinimalOffsets(
368     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
369     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
370     bool UseAssumed = false) {
371 
372   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
373     const IRPosition &Pos = IRPosition::value(V);
374     // Only track dependence if we are going to use the assumed info.
375     const AAValueConstantRange &ValueConstantRangeAA =
376         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
377                                          UseAssumed ? DepClassTy::OPTIONAL
378                                                     : DepClassTy::NONE);
379     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
380                                      : ValueConstantRangeAA.getKnown();
381     // We can only use the lower part of the range because the upper part can
382     // be higher than what the value can really be.
383     ROffset = Range.getSignedMin();
384     return true;
385   };
386 
387   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
388                                                 AttributorAnalysis);
389 }
390 
391 static const Value *getMinimalBaseOfAccsesPointerOperand(
392     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
393     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
394   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
395   if (!Ptr)
396     return nullptr;
397   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
398   const Value *Base = stripAndAccumulateMinimalOffsets(
399       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
400 
401   BytesOffset = OffsetAPInt.getSExtValue();
402   return Base;
403 }
404 
405 static const Value *
406 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
407                                      const DataLayout &DL,
408                                      bool AllowNonInbounds = false) {
409   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
410   if (!Ptr)
411     return nullptr;
412 
413   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
414                                           AllowNonInbounds);
415 }
416 
417 /// Helper function to clamp a state \p S of type \p StateType with the
418 /// information in \p R and indicate/return if \p S did change (as-in update is
419 /// required to be run again).
420 template <typename StateType>
421 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
422   auto Assumed = S.getAssumed();
423   S ^= R;
424   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
425                                    : ChangeStatus::CHANGED;
426 }
427 
428 /// Clamp the information known for all returned values of a function
429 /// (identified by \p QueryingAA) into \p S.
430 template <typename AAType, typename StateType = typename AAType::StateType>
431 static void clampReturnedValueStates(
432     Attributor &A, const AAType &QueryingAA, StateType &S,
433     const IRPosition::CallBaseContext *CBContext = nullptr) {
434   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
435                     << QueryingAA << " into " << S << "\n");
436 
437   assert((QueryingAA.getIRPosition().getPositionKind() ==
438               IRPosition::IRP_RETURNED ||
439           QueryingAA.getIRPosition().getPositionKind() ==
440               IRPosition::IRP_CALL_SITE_RETURNED) &&
441          "Can only clamp returned value states for a function returned or call "
442          "site returned position!");
443 
444   // Use an optional state as there might not be any return values and we want
445   // to join (IntegerState::operator&) the state of all there are.
446   Optional<StateType> T;
447 
448   // Callback for each possibly returned value.
449   auto CheckReturnValue = [&](Value &RV) -> bool {
450     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
451     const AAType &AA =
452         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
453     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
454                       << " @ " << RVPos << "\n");
455     const StateType &AAS = AA.getState();
456     if (T.hasValue())
457       *T &= AAS;
458     else
459       T = AAS;
460     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
461                       << "\n");
462     return T->isValidState();
463   };
464 
465   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
466     S.indicatePessimisticFixpoint();
467   else if (T.hasValue())
468     S ^= *T;
469 }
470 
471 /// Helper class for generic deduction: return value -> returned position.
472 template <typename AAType, typename BaseType,
473           typename StateType = typename BaseType::StateType,
474           bool PropagateCallBaseContext = false>
475 struct AAReturnedFromReturnedValues : public BaseType {
476   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
477       : BaseType(IRP, A) {}
478 
479   /// See AbstractAttribute::updateImpl(...).
480   ChangeStatus updateImpl(Attributor &A) override {
481     StateType S(StateType::getBestState(this->getState()));
482     clampReturnedValueStates<AAType, StateType>(
483         A, *this, S,
484         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
485     // TODO: If we know we visited all returned values, thus no are assumed
486     // dead, we can take the known information from the state T.
487     return clampStateAndIndicateChange<StateType>(this->getState(), S);
488   }
489 };
490 
491 /// Clamp the information known at all call sites for a given argument
492 /// (identified by \p QueryingAA) into \p S.
493 template <typename AAType, typename StateType = typename AAType::StateType>
494 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
495                                         StateType &S) {
496   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
497                     << QueryingAA << " into " << S << "\n");
498 
499   assert(QueryingAA.getIRPosition().getPositionKind() ==
500              IRPosition::IRP_ARGUMENT &&
501          "Can only clamp call site argument states for an argument position!");
502 
503   // Use an optional state as there might not be any return values and we want
504   // to join (IntegerState::operator&) the state of all there are.
505   Optional<StateType> T;
506 
507   // The argument number which is also the call site argument number.
508   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
509 
510   auto CallSiteCheck = [&](AbstractCallSite ACS) {
511     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
512     // Check if a coresponding argument was found or if it is on not associated
513     // (which can happen for callback calls).
514     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
515       return false;
516 
517     const AAType &AA =
518         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
519     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
520                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
521     const StateType &AAS = AA.getState();
522     if (T.hasValue())
523       *T &= AAS;
524     else
525       T = AAS;
526     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
527                       << "\n");
528     return T->isValidState();
529   };
530 
531   bool AllCallSitesKnown;
532   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
533                               AllCallSitesKnown))
534     S.indicatePessimisticFixpoint();
535   else if (T.hasValue())
536     S ^= *T;
537 }
538 
539 /// This function is the bridge between argument position and the call base
540 /// context.
541 template <typename AAType, typename BaseType,
542           typename StateType = typename AAType::StateType>
543 bool getArgumentStateFromCallBaseContext(Attributor &A,
544                                          BaseType &QueryingAttribute,
545                                          IRPosition &Pos, StateType &State) {
546   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
547          "Expected an 'argument' position !");
548   const CallBase *CBContext = Pos.getCallBaseContext();
549   if (!CBContext)
550     return false;
551 
552   int ArgNo = Pos.getCallSiteArgNo();
553   assert(ArgNo >= 0 && "Invalid Arg No!");
554 
555   const auto &AA = A.getAAFor<AAType>(
556       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
557       DepClassTy::REQUIRED);
558   const StateType &CBArgumentState =
559       static_cast<const StateType &>(AA.getState());
560 
561   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
562                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
563                     << "\n");
564 
565   // NOTE: If we want to do call site grouping it should happen here.
566   State ^= CBArgumentState;
567   return true;
568 }
569 
570 /// Helper class for generic deduction: call site argument -> argument position.
571 template <typename AAType, typename BaseType,
572           typename StateType = typename AAType::StateType,
573           bool BridgeCallBaseContext = false>
574 struct AAArgumentFromCallSiteArguments : public BaseType {
575   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
576       : BaseType(IRP, A) {}
577 
578   /// See AbstractAttribute::updateImpl(...).
579   ChangeStatus updateImpl(Attributor &A) override {
580     StateType S = StateType::getBestState(this->getState());
581 
582     if (BridgeCallBaseContext) {
583       bool Success =
584           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
585               A, *this, this->getIRPosition(), S);
586       if (Success)
587         return clampStateAndIndicateChange<StateType>(this->getState(), S);
588     }
589     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
590 
591     // TODO: If we know we visited all incoming values, thus no are assumed
592     // dead, we can take the known information from the state T.
593     return clampStateAndIndicateChange<StateType>(this->getState(), S);
594   }
595 };
596 
597 /// Helper class for generic replication: function returned -> cs returned.
598 template <typename AAType, typename BaseType,
599           typename StateType = typename BaseType::StateType,
600           bool IntroduceCallBaseContext = false>
601 struct AACallSiteReturnedFromReturned : public BaseType {
602   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
603       : BaseType(IRP, A) {}
604 
605   /// See AbstractAttribute::updateImpl(...).
606   ChangeStatus updateImpl(Attributor &A) override {
607     assert(this->getIRPosition().getPositionKind() ==
608                IRPosition::IRP_CALL_SITE_RETURNED &&
609            "Can only wrap function returned positions for call site returned "
610            "positions!");
611     auto &S = this->getState();
612 
613     const Function *AssociatedFunction =
614         this->getIRPosition().getAssociatedFunction();
615     if (!AssociatedFunction)
616       return S.indicatePessimisticFixpoint();
617 
618     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
619     if (IntroduceCallBaseContext)
620       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
621                         << CBContext << "\n");
622 
623     IRPosition FnPos = IRPosition::returned(
624         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
625     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
626     return clampStateAndIndicateChange(S, AA.getState());
627   }
628 };
629 
630 /// Helper function to accumulate uses.
631 template <class AAType, typename StateType = typename AAType::StateType>
632 static void followUsesInContext(AAType &AA, Attributor &A,
633                                 MustBeExecutedContextExplorer &Explorer,
634                                 const Instruction *CtxI,
635                                 SetVector<const Use *> &Uses,
636                                 StateType &State) {
637   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
638   for (unsigned u = 0; u < Uses.size(); ++u) {
639     const Use *U = Uses[u];
640     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
641       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
642       if (Found && AA.followUseInMBEC(A, U, UserI, State))
643         for (const Use &Us : UserI->uses())
644           Uses.insert(&Us);
645     }
646   }
647 }
648 
649 /// Use the must-be-executed-context around \p I to add information into \p S.
650 /// The AAType class is required to have `followUseInMBEC` method with the
651 /// following signature and behaviour:
652 ///
653 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
654 /// U - Underlying use.
655 /// I - The user of the \p U.
656 /// Returns true if the value should be tracked transitively.
657 ///
658 template <class AAType, typename StateType = typename AAType::StateType>
659 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
660                              Instruction &CtxI) {
661 
662   // Container for (transitive) uses of the associated value.
663   SetVector<const Use *> Uses;
664   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
665     Uses.insert(&U);
666 
667   MustBeExecutedContextExplorer &Explorer =
668       A.getInfoCache().getMustBeExecutedContextExplorer();
669 
670   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
671 
672   if (S.isAtFixpoint())
673     return;
674 
675   SmallVector<const BranchInst *, 4> BrInsts;
676   auto Pred = [&](const Instruction *I) {
677     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
678       if (Br->isConditional())
679         BrInsts.push_back(Br);
680     return true;
681   };
682 
683   // Here, accumulate conditional branch instructions in the context. We
684   // explore the child paths and collect the known states. The disjunction of
685   // those states can be merged to its own state. Let ParentState_i be a state
686   // to indicate the known information for an i-th branch instruction in the
687   // context. ChildStates are created for its successors respectively.
688   //
689   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
690   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
691   //      ...
692   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
693   //
694   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
695   //
696   // FIXME: Currently, recursive branches are not handled. For example, we
697   // can't deduce that ptr must be dereferenced in below function.
698   //
699   // void f(int a, int c, int *ptr) {
700   //    if(a)
701   //      if (b) {
702   //        *ptr = 0;
703   //      } else {
704   //        *ptr = 1;
705   //      }
706   //    else {
707   //      if (b) {
708   //        *ptr = 0;
709   //      } else {
710   //        *ptr = 1;
711   //      }
712   //    }
713   // }
714 
715   Explorer.checkForAllContext(&CtxI, Pred);
716   for (const BranchInst *Br : BrInsts) {
717     StateType ParentState;
718 
719     // The known state of the parent state is a conjunction of children's
720     // known states so it is initialized with a best state.
721     ParentState.indicateOptimisticFixpoint();
722 
723     for (const BasicBlock *BB : Br->successors()) {
724       StateType ChildState;
725 
726       size_t BeforeSize = Uses.size();
727       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
728 
729       // Erase uses which only appear in the child.
730       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
731         It = Uses.erase(It);
732 
733       ParentState &= ChildState;
734     }
735 
736     // Use only known state.
737     S += ParentState;
738   }
739 }
740 
741 /// -----------------------NoUnwind Function Attribute--------------------------
742 
743 struct AANoUnwindImpl : AANoUnwind {
744   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
745 
746   const std::string getAsStr() const override {
747     return getAssumed() ? "nounwind" : "may-unwind";
748   }
749 
750   /// See AbstractAttribute::updateImpl(...).
751   ChangeStatus updateImpl(Attributor &A) override {
752     auto Opcodes = {
753         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
754         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
755         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
756 
757     auto CheckForNoUnwind = [&](Instruction &I) {
758       if (!I.mayThrow())
759         return true;
760 
761       if (const auto *CB = dyn_cast<CallBase>(&I)) {
762         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
763             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
764         return NoUnwindAA.isAssumedNoUnwind();
765       }
766       return false;
767     };
768 
769     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
770       return indicatePessimisticFixpoint();
771 
772     return ChangeStatus::UNCHANGED;
773   }
774 };
775 
776 struct AANoUnwindFunction final : public AANoUnwindImpl {
777   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
778       : AANoUnwindImpl(IRP, A) {}
779 
780   /// See AbstractAttribute::trackStatistics()
781   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
782 };
783 
784 /// NoUnwind attribute deduction for a call sites.
785 struct AANoUnwindCallSite final : AANoUnwindImpl {
786   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
787       : AANoUnwindImpl(IRP, A) {}
788 
789   /// See AbstractAttribute::initialize(...).
790   void initialize(Attributor &A) override {
791     AANoUnwindImpl::initialize(A);
792     Function *F = getAssociatedFunction();
793     if (!F || F->isDeclaration())
794       indicatePessimisticFixpoint();
795   }
796 
797   /// See AbstractAttribute::updateImpl(...).
798   ChangeStatus updateImpl(Attributor &A) override {
799     // TODO: Once we have call site specific value information we can provide
800     //       call site specific liveness information and then it makes
801     //       sense to specialize attributes for call sites arguments instead of
802     //       redirecting requests to the callee argument.
803     Function *F = getAssociatedFunction();
804     const IRPosition &FnPos = IRPosition::function(*F);
805     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
806     return clampStateAndIndicateChange(getState(), FnAA.getState());
807   }
808 
809   /// See AbstractAttribute::trackStatistics()
810   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
811 };
812 
813 /// --------------------- Function Return Values -------------------------------
814 
815 /// "Attribute" that collects all potential returned values and the return
816 /// instructions that they arise from.
817 ///
818 /// If there is a unique returned value R, the manifest method will:
819 ///   - mark R with the "returned" attribute, if R is an argument.
820 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
821 
822   /// Mapping of values potentially returned by the associated function to the
823   /// return instructions that might return them.
824   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
825 
826   /// Mapping to remember the number of returned values for a call site such
827   /// that we can avoid updates if nothing changed.
828   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
829 
830   /// Set of unresolved calls returned by the associated function.
831   SmallSetVector<CallBase *, 4> UnresolvedCalls;
832 
833   /// State flags
834   ///
835   ///{
836   bool IsFixed = false;
837   bool IsValidState = true;
838   ///}
839 
840 public:
841   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
842       : AAReturnedValues(IRP, A) {}
843 
844   /// See AbstractAttribute::initialize(...).
845   void initialize(Attributor &A) override {
846     // Reset the state.
847     IsFixed = false;
848     IsValidState = true;
849     ReturnedValues.clear();
850 
851     Function *F = getAssociatedFunction();
852     if (!F || F->isDeclaration()) {
853       indicatePessimisticFixpoint();
854       return;
855     }
856     assert(!F->getReturnType()->isVoidTy() &&
857            "Did not expect a void return type!");
858 
859     // The map from instruction opcodes to those instructions in the function.
860     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
861 
862     // Look through all arguments, if one is marked as returned we are done.
863     for (Argument &Arg : F->args()) {
864       if (Arg.hasReturnedAttr()) {
865         auto &ReturnInstSet = ReturnedValues[&Arg];
866         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
867           for (Instruction *RI : *Insts)
868             ReturnInstSet.insert(cast<ReturnInst>(RI));
869 
870         indicateOptimisticFixpoint();
871         return;
872       }
873     }
874 
875     if (!A.isFunctionIPOAmendable(*F))
876       indicatePessimisticFixpoint();
877   }
878 
879   /// See AbstractAttribute::manifest(...).
880   ChangeStatus manifest(Attributor &A) override;
881 
882   /// See AbstractAttribute::getState(...).
883   AbstractState &getState() override { return *this; }
884 
885   /// See AbstractAttribute::getState(...).
886   const AbstractState &getState() const override { return *this; }
887 
888   /// See AbstractAttribute::updateImpl(Attributor &A).
889   ChangeStatus updateImpl(Attributor &A) override;
890 
891   llvm::iterator_range<iterator> returned_values() override {
892     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
893   }
894 
895   llvm::iterator_range<const_iterator> returned_values() const override {
896     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
897   }
898 
899   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
900     return UnresolvedCalls;
901   }
902 
903   /// Return the number of potential return values, -1 if unknown.
904   size_t getNumReturnValues() const override {
905     return isValidState() ? ReturnedValues.size() : -1;
906   }
907 
908   /// Return an assumed unique return value if a single candidate is found. If
909   /// there cannot be one, return a nullptr. If it is not clear yet, return the
910   /// Optional::NoneType.
911   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
912 
913   /// See AbstractState::checkForAllReturnedValues(...).
914   bool checkForAllReturnedValuesAndReturnInsts(
915       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
916       const override;
917 
918   /// Pretty print the attribute similar to the IR representation.
919   const std::string getAsStr() const override;
920 
921   /// See AbstractState::isAtFixpoint().
922   bool isAtFixpoint() const override { return IsFixed; }
923 
924   /// See AbstractState::isValidState().
925   bool isValidState() const override { return IsValidState; }
926 
927   /// See AbstractState::indicateOptimisticFixpoint(...).
928   ChangeStatus indicateOptimisticFixpoint() override {
929     IsFixed = true;
930     return ChangeStatus::UNCHANGED;
931   }
932 
933   ChangeStatus indicatePessimisticFixpoint() override {
934     IsFixed = true;
935     IsValidState = false;
936     return ChangeStatus::CHANGED;
937   }
938 };
939 
940 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
941   ChangeStatus Changed = ChangeStatus::UNCHANGED;
942 
943   // Bookkeeping.
944   assert(isValidState());
945   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
946                   "Number of function with known return values");
947 
948   // Check if we have an assumed unique return value that we could manifest.
949   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
950 
951   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
952     return Changed;
953 
954   // Bookkeeping.
955   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
956                   "Number of function with unique return");
957 
958   // Callback to replace the uses of CB with the constant C.
959   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
960     if (CB.use_empty())
961       return ChangeStatus::UNCHANGED;
962     if (A.changeValueAfterManifest(CB, C))
963       return ChangeStatus::CHANGED;
964     return ChangeStatus::UNCHANGED;
965   };
966 
967   // If the assumed unique return value is an argument, annotate it.
968   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
969     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
970             getAssociatedFunction()->getReturnType())) {
971       getIRPosition() = IRPosition::argument(*UniqueRVArg);
972       Changed = IRAttribute::manifest(A);
973     }
974   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
975     // We can replace the returned value with the unique returned constant.
976     Value &AnchorValue = getAnchorValue();
977     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
978       for (const Use &U : F->uses())
979         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
980           if (CB->isCallee(&U)) {
981             Constant *RVCCast =
982                 CB->getType() == RVC->getType()
983                     ? RVC
984                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
985             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
986           }
987     } else {
988       assert(isa<CallBase>(AnchorValue) &&
989              "Expcected a function or call base anchor!");
990       Constant *RVCCast =
991           AnchorValue.getType() == RVC->getType()
992               ? RVC
993               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
994       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
995     }
996     if (Changed == ChangeStatus::CHANGED)
997       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
998                       "Number of function returns replaced by constant return");
999   }
1000 
1001   return Changed;
1002 }
1003 
1004 const std::string AAReturnedValuesImpl::getAsStr() const {
1005   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1006          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1007          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1008 }
1009 
1010 Optional<Value *>
1011 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1012   // If checkForAllReturnedValues provides a unique value, ignoring potential
1013   // undef values that can also be present, it is assumed to be the actual
1014   // return value and forwarded to the caller of this method. If there are
1015   // multiple, a nullptr is returned indicating there cannot be a unique
1016   // returned value.
1017   Optional<Value *> UniqueRV;
1018 
1019   auto Pred = [&](Value &RV) -> bool {
1020     // If we found a second returned value and neither the current nor the saved
1021     // one is an undef, there is no unique returned value. Undefs are special
1022     // since we can pretend they have any value.
1023     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1024         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1025       UniqueRV = nullptr;
1026       return false;
1027     }
1028 
1029     // Do not overwrite a value with an undef.
1030     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1031       UniqueRV = &RV;
1032 
1033     return true;
1034   };
1035 
1036   if (!A.checkForAllReturnedValues(Pred, *this))
1037     UniqueRV = nullptr;
1038 
1039   return UniqueRV;
1040 }
1041 
1042 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1043     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1044     const {
1045   if (!isValidState())
1046     return false;
1047 
1048   // Check all returned values but ignore call sites as long as we have not
1049   // encountered an overdefined one during an update.
1050   for (auto &It : ReturnedValues) {
1051     Value *RV = It.first;
1052 
1053     CallBase *CB = dyn_cast<CallBase>(RV);
1054     if (CB && !UnresolvedCalls.count(CB))
1055       continue;
1056 
1057     if (!Pred(*RV, It.second))
1058       return false;
1059   }
1060 
1061   return true;
1062 }
1063 
1064 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1065   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1066   bool Changed = false;
1067 
1068   // State used in the value traversals starting in returned values.
1069   struct RVState {
1070     // The map in which we collect return values -> return instrs.
1071     decltype(ReturnedValues) &RetValsMap;
1072     // The flag to indicate a change.
1073     bool &Changed;
1074     // The return instrs we come from.
1075     SmallSetVector<ReturnInst *, 4> RetInsts;
1076   };
1077 
1078   // Callback for a leaf value returned by the associated function.
1079   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1080                          bool) -> bool {
1081     auto Size = RVS.RetValsMap[&Val].size();
1082     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1083     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1084     RVS.Changed |= Inserted;
1085     LLVM_DEBUG({
1086       if (Inserted)
1087         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1088                << " => " << RVS.RetInsts.size() << "\n";
1089     });
1090     return true;
1091   };
1092 
1093   // Helper method to invoke the generic value traversal.
1094   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1095                                 const Instruction *CtxI) {
1096     IRPosition RetValPos = IRPosition::value(RV);
1097     return genericValueTraversal<AAReturnedValues, RVState>(
1098         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1099         /* UseValueSimplify */ false);
1100   };
1101 
1102   // Callback for all "return intructions" live in the associated function.
1103   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1104     ReturnInst &Ret = cast<ReturnInst>(I);
1105     RVState RVS({ReturnedValues, Changed, {}});
1106     RVS.RetInsts.insert(&Ret);
1107     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1108   };
1109 
1110   // Start by discovering returned values from all live returned instructions in
1111   // the associated function.
1112   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1113     return indicatePessimisticFixpoint();
1114 
1115   // Once returned values "directly" present in the code are handled we try to
1116   // resolve returned calls. To avoid modifications to the ReturnedValues map
1117   // while we iterate over it we kept record of potential new entries in a copy
1118   // map, NewRVsMap.
1119   decltype(ReturnedValues) NewRVsMap;
1120 
1121   auto HandleReturnValue = [&](Value *RV,
1122                                SmallSetVector<ReturnInst *, 4> &RIs) {
1123     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1124                       << RIs.size() << " RIs\n");
1125     CallBase *CB = dyn_cast<CallBase>(RV);
1126     if (!CB || UnresolvedCalls.count(CB))
1127       return;
1128 
1129     if (!CB->getCalledFunction()) {
1130       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1131                         << "\n");
1132       UnresolvedCalls.insert(CB);
1133       return;
1134     }
1135 
1136     // TODO: use the function scope once we have call site AAReturnedValues.
1137     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1138         *this, IRPosition::function(*CB->getCalledFunction()),
1139         DepClassTy::REQUIRED);
1140     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1141                       << RetValAA << "\n");
1142 
1143     // Skip dead ends, thus if we do not know anything about the returned
1144     // call we mark it as unresolved and it will stay that way.
1145     if (!RetValAA.getState().isValidState()) {
1146       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1147                         << "\n");
1148       UnresolvedCalls.insert(CB);
1149       return;
1150     }
1151 
1152     // Do not try to learn partial information. If the callee has unresolved
1153     // return values we will treat the call as unresolved/opaque.
1154     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1155     if (!RetValAAUnresolvedCalls.empty()) {
1156       UnresolvedCalls.insert(CB);
1157       return;
1158     }
1159 
1160     // Now check if we can track transitively returned values. If possible, thus
1161     // if all return value can be represented in the current scope, do so.
1162     bool Unresolved = false;
1163     for (auto &RetValAAIt : RetValAA.returned_values()) {
1164       Value *RetVal = RetValAAIt.first;
1165       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1166           isa<Constant>(RetVal))
1167         continue;
1168       // Anything that did not fit in the above categories cannot be resolved,
1169       // mark the call as unresolved.
1170       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1171                            "cannot be translated: "
1172                         << *RetVal << "\n");
1173       UnresolvedCalls.insert(CB);
1174       Unresolved = true;
1175       break;
1176     }
1177 
1178     if (Unresolved)
1179       return;
1180 
1181     // Now track transitively returned values.
1182     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1183     if (NumRetAA == RetValAA.getNumReturnValues()) {
1184       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1185                            "changed since it was seen last\n");
1186       return;
1187     }
1188     NumRetAA = RetValAA.getNumReturnValues();
1189 
1190     for (auto &RetValAAIt : RetValAA.returned_values()) {
1191       Value *RetVal = RetValAAIt.first;
1192       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1193         // Arguments are mapped to call site operands and we begin the traversal
1194         // again.
1195         bool Unused = false;
1196         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1197         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1198         continue;
1199       }
1200       if (isa<CallBase>(RetVal)) {
1201         // Call sites are resolved by the callee attribute over time, no need to
1202         // do anything for us.
1203         continue;
1204       }
1205       if (isa<Constant>(RetVal)) {
1206         // Constants are valid everywhere, we can simply take them.
1207         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1208         continue;
1209       }
1210     }
1211   };
1212 
1213   for (auto &It : ReturnedValues)
1214     HandleReturnValue(It.first, It.second);
1215 
1216   // Because processing the new information can again lead to new return values
1217   // we have to be careful and iterate until this iteration is complete. The
1218   // idea is that we are in a stable state at the end of an update. All return
1219   // values have been handled and properly categorized. We might not update
1220   // again if we have not requested a non-fix attribute so we cannot "wait" for
1221   // the next update to analyze a new return value.
1222   while (!NewRVsMap.empty()) {
1223     auto It = std::move(NewRVsMap.back());
1224     NewRVsMap.pop_back();
1225 
1226     assert(!It.second.empty() && "Entry does not add anything.");
1227     auto &ReturnInsts = ReturnedValues[It.first];
1228     for (ReturnInst *RI : It.second)
1229       if (ReturnInsts.insert(RI)) {
1230         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1231                           << *It.first << " => " << *RI << "\n");
1232         HandleReturnValue(It.first, ReturnInsts);
1233         Changed = true;
1234       }
1235   }
1236 
1237   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1238   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1239 }
1240 
1241 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1242   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1243       : AAReturnedValuesImpl(IRP, A) {}
1244 
1245   /// See AbstractAttribute::trackStatistics()
1246   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1247 };
1248 
1249 /// Returned values information for a call sites.
1250 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1251   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1252       : AAReturnedValuesImpl(IRP, A) {}
1253 
1254   /// See AbstractAttribute::initialize(...).
1255   void initialize(Attributor &A) override {
1256     // TODO: Once we have call site specific value information we can provide
1257     //       call site specific liveness information and then it makes
1258     //       sense to specialize attributes for call sites instead of
1259     //       redirecting requests to the callee.
1260     llvm_unreachable("Abstract attributes for returned values are not "
1261                      "supported for call sites yet!");
1262   }
1263 
1264   /// See AbstractAttribute::updateImpl(...).
1265   ChangeStatus updateImpl(Attributor &A) override {
1266     return indicatePessimisticFixpoint();
1267   }
1268 
1269   /// See AbstractAttribute::trackStatistics()
1270   void trackStatistics() const override {}
1271 };
1272 
1273 /// ------------------------ NoSync Function Attribute -------------------------
1274 
1275 struct AANoSyncImpl : AANoSync {
1276   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1277 
1278   const std::string getAsStr() const override {
1279     return getAssumed() ? "nosync" : "may-sync";
1280   }
1281 
1282   /// See AbstractAttribute::updateImpl(...).
1283   ChangeStatus updateImpl(Attributor &A) override;
1284 
1285   /// Helper function used to determine whether an instruction is non-relaxed
1286   /// atomic. In other words, if an atomic instruction does not have unordered
1287   /// or monotonic ordering
1288   static bool isNonRelaxedAtomic(Instruction *I);
1289 
1290   /// Helper function used to determine whether an instruction is volatile.
1291   static bool isVolatile(Instruction *I);
1292 
1293   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1294   /// memset).
1295   static bool isNoSyncIntrinsic(Instruction *I);
1296 };
1297 
1298 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1299   if (!I->isAtomic())
1300     return false;
1301 
1302   AtomicOrdering Ordering;
1303   switch (I->getOpcode()) {
1304   case Instruction::AtomicRMW:
1305     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1306     break;
1307   case Instruction::Store:
1308     Ordering = cast<StoreInst>(I)->getOrdering();
1309     break;
1310   case Instruction::Load:
1311     Ordering = cast<LoadInst>(I)->getOrdering();
1312     break;
1313   case Instruction::Fence: {
1314     auto *FI = cast<FenceInst>(I);
1315     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1316       return false;
1317     Ordering = FI->getOrdering();
1318     break;
1319   }
1320   case Instruction::AtomicCmpXchg: {
1321     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1322     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1323     // Only if both are relaxed, than it can be treated as relaxed.
1324     // Otherwise it is non-relaxed.
1325     if (Success != AtomicOrdering::Unordered &&
1326         Success != AtomicOrdering::Monotonic)
1327       return true;
1328     if (Failure != AtomicOrdering::Unordered &&
1329         Failure != AtomicOrdering::Monotonic)
1330       return true;
1331     return false;
1332   }
1333   default:
1334     llvm_unreachable(
1335         "New atomic operations need to be known in the attributor.");
1336   }
1337 
1338   // Relaxed.
1339   if (Ordering == AtomicOrdering::Unordered ||
1340       Ordering == AtomicOrdering::Monotonic)
1341     return false;
1342   return true;
1343 }
1344 
1345 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1346 /// FIXME: We should ipmrove the handling of intrinsics.
1347 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1348   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1349     switch (II->getIntrinsicID()) {
1350     /// Element wise atomic memory intrinsics are can only be unordered,
1351     /// therefore nosync.
1352     case Intrinsic::memset_element_unordered_atomic:
1353     case Intrinsic::memmove_element_unordered_atomic:
1354     case Intrinsic::memcpy_element_unordered_atomic:
1355       return true;
1356     case Intrinsic::memset:
1357     case Intrinsic::memmove:
1358     case Intrinsic::memcpy:
1359       if (!cast<MemIntrinsic>(II)->isVolatile())
1360         return true;
1361       return false;
1362     default:
1363       return false;
1364     }
1365   }
1366   return false;
1367 }
1368 
1369 bool AANoSyncImpl::isVolatile(Instruction *I) {
1370   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1371 
1372   switch (I->getOpcode()) {
1373   case Instruction::AtomicRMW:
1374     return cast<AtomicRMWInst>(I)->isVolatile();
1375   case Instruction::Store:
1376     return cast<StoreInst>(I)->isVolatile();
1377   case Instruction::Load:
1378     return cast<LoadInst>(I)->isVolatile();
1379   case Instruction::AtomicCmpXchg:
1380     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1381   default:
1382     return false;
1383   }
1384 }
1385 
1386 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1387 
1388   auto CheckRWInstForNoSync = [&](Instruction &I) {
1389     /// We are looking for volatile instructions or Non-Relaxed atomics.
1390     /// FIXME: We should improve the handling of intrinsics.
1391 
1392     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1393       return true;
1394 
1395     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1396       if (CB->hasFnAttr(Attribute::NoSync))
1397         return true;
1398 
1399       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1400           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1401       return NoSyncAA.isAssumedNoSync();
1402     }
1403 
1404     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1405       return true;
1406 
1407     return false;
1408   };
1409 
1410   auto CheckForNoSync = [&](Instruction &I) {
1411     // At this point we handled all read/write effects and they are all
1412     // nosync, so they can be skipped.
1413     if (I.mayReadOrWriteMemory())
1414       return true;
1415 
1416     // non-convergent and readnone imply nosync.
1417     return !cast<CallBase>(I).isConvergent();
1418   };
1419 
1420   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1421       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1422     return indicatePessimisticFixpoint();
1423 
1424   return ChangeStatus::UNCHANGED;
1425 }
1426 
1427 struct AANoSyncFunction final : public AANoSyncImpl {
1428   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1429       : AANoSyncImpl(IRP, A) {}
1430 
1431   /// See AbstractAttribute::trackStatistics()
1432   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1433 };
1434 
1435 /// NoSync attribute deduction for a call sites.
1436 struct AANoSyncCallSite final : AANoSyncImpl {
1437   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1438       : AANoSyncImpl(IRP, A) {}
1439 
1440   /// See AbstractAttribute::initialize(...).
1441   void initialize(Attributor &A) override {
1442     AANoSyncImpl::initialize(A);
1443     Function *F = getAssociatedFunction();
1444     if (!F || F->isDeclaration())
1445       indicatePessimisticFixpoint();
1446   }
1447 
1448   /// See AbstractAttribute::updateImpl(...).
1449   ChangeStatus updateImpl(Attributor &A) override {
1450     // TODO: Once we have call site specific value information we can provide
1451     //       call site specific liveness information and then it makes
1452     //       sense to specialize attributes for call sites arguments instead of
1453     //       redirecting requests to the callee argument.
1454     Function *F = getAssociatedFunction();
1455     const IRPosition &FnPos = IRPosition::function(*F);
1456     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1457     return clampStateAndIndicateChange(getState(), FnAA.getState());
1458   }
1459 
1460   /// See AbstractAttribute::trackStatistics()
1461   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1462 };
1463 
1464 /// ------------------------ No-Free Attributes ----------------------------
1465 
1466 struct AANoFreeImpl : public AANoFree {
1467   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1468 
1469   /// See AbstractAttribute::updateImpl(...).
1470   ChangeStatus updateImpl(Attributor &A) override {
1471     auto CheckForNoFree = [&](Instruction &I) {
1472       const auto &CB = cast<CallBase>(I);
1473       if (CB.hasFnAttr(Attribute::NoFree))
1474         return true;
1475 
1476       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1477           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1478       return NoFreeAA.isAssumedNoFree();
1479     };
1480 
1481     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1482       return indicatePessimisticFixpoint();
1483     return ChangeStatus::UNCHANGED;
1484   }
1485 
1486   /// See AbstractAttribute::getAsStr().
1487   const std::string getAsStr() const override {
1488     return getAssumed() ? "nofree" : "may-free";
1489   }
1490 };
1491 
1492 struct AANoFreeFunction final : public AANoFreeImpl {
1493   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1494       : AANoFreeImpl(IRP, A) {}
1495 
1496   /// See AbstractAttribute::trackStatistics()
1497   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1498 };
1499 
1500 /// NoFree attribute deduction for a call sites.
1501 struct AANoFreeCallSite final : AANoFreeImpl {
1502   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1503       : AANoFreeImpl(IRP, A) {}
1504 
1505   /// See AbstractAttribute::initialize(...).
1506   void initialize(Attributor &A) override {
1507     AANoFreeImpl::initialize(A);
1508     Function *F = getAssociatedFunction();
1509     if (!F || F->isDeclaration())
1510       indicatePessimisticFixpoint();
1511   }
1512 
1513   /// See AbstractAttribute::updateImpl(...).
1514   ChangeStatus updateImpl(Attributor &A) override {
1515     // TODO: Once we have call site specific value information we can provide
1516     //       call site specific liveness information and then it makes
1517     //       sense to specialize attributes for call sites arguments instead of
1518     //       redirecting requests to the callee argument.
1519     Function *F = getAssociatedFunction();
1520     const IRPosition &FnPos = IRPosition::function(*F);
1521     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1522     return clampStateAndIndicateChange(getState(), FnAA.getState());
1523   }
1524 
1525   /// See AbstractAttribute::trackStatistics()
1526   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1527 };
1528 
1529 /// NoFree attribute for floating values.
1530 struct AANoFreeFloating : AANoFreeImpl {
1531   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1532       : AANoFreeImpl(IRP, A) {}
1533 
1534   /// See AbstractAttribute::trackStatistics()
1535   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1536 
1537   /// See Abstract Attribute::updateImpl(...).
1538   ChangeStatus updateImpl(Attributor &A) override {
1539     const IRPosition &IRP = getIRPosition();
1540 
1541     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1542         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1543     if (NoFreeAA.isAssumedNoFree())
1544       return ChangeStatus::UNCHANGED;
1545 
1546     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1547     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1548       Instruction *UserI = cast<Instruction>(U.getUser());
1549       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1550         if (CB->isBundleOperand(&U))
1551           return false;
1552         if (!CB->isArgOperand(&U))
1553           return true;
1554         unsigned ArgNo = CB->getArgOperandNo(&U);
1555 
1556         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1557             *this, IRPosition::callsite_argument(*CB, ArgNo),
1558             DepClassTy::REQUIRED);
1559         return NoFreeArg.isAssumedNoFree();
1560       }
1561 
1562       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1563           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1564         Follow = true;
1565         return true;
1566       }
1567       if (isa<ReturnInst>(UserI))
1568         return true;
1569 
1570       // Unknown user.
1571       return false;
1572     };
1573     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1574       return indicatePessimisticFixpoint();
1575 
1576     return ChangeStatus::UNCHANGED;
1577   }
1578 };
1579 
1580 /// NoFree attribute for a call site argument.
1581 struct AANoFreeArgument final : AANoFreeFloating {
1582   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1583       : AANoFreeFloating(IRP, A) {}
1584 
1585   /// See AbstractAttribute::trackStatistics()
1586   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1587 };
1588 
1589 /// NoFree attribute for call site arguments.
1590 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1591   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1592       : AANoFreeFloating(IRP, A) {}
1593 
1594   /// See AbstractAttribute::updateImpl(...).
1595   ChangeStatus updateImpl(Attributor &A) override {
1596     // TODO: Once we have call site specific value information we can provide
1597     //       call site specific liveness information and then it makes
1598     //       sense to specialize attributes for call sites arguments instead of
1599     //       redirecting requests to the callee argument.
1600     Argument *Arg = getAssociatedArgument();
1601     if (!Arg)
1602       return indicatePessimisticFixpoint();
1603     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1604     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1605     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1606   }
1607 
1608   /// See AbstractAttribute::trackStatistics()
1609   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1610 };
1611 
1612 /// NoFree attribute for function return value.
1613 struct AANoFreeReturned final : AANoFreeFloating {
1614   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1615       : AANoFreeFloating(IRP, A) {
1616     llvm_unreachable("NoFree is not applicable to function returns!");
1617   }
1618 
1619   /// See AbstractAttribute::initialize(...).
1620   void initialize(Attributor &A) override {
1621     llvm_unreachable("NoFree is not applicable to function returns!");
1622   }
1623 
1624   /// See AbstractAttribute::updateImpl(...).
1625   ChangeStatus updateImpl(Attributor &A) override {
1626     llvm_unreachable("NoFree is not applicable to function returns!");
1627   }
1628 
1629   /// See AbstractAttribute::trackStatistics()
1630   void trackStatistics() const override {}
1631 };
1632 
1633 /// NoFree attribute deduction for a call site return value.
1634 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1635   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1636       : AANoFreeFloating(IRP, A) {}
1637 
1638   ChangeStatus manifest(Attributor &A) override {
1639     return ChangeStatus::UNCHANGED;
1640   }
1641   /// See AbstractAttribute::trackStatistics()
1642   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1643 };
1644 
1645 /// ------------------------ NonNull Argument Attribute ------------------------
1646 static int64_t getKnownNonNullAndDerefBytesForUse(
1647     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1648     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1649   TrackUse = false;
1650 
1651   const Value *UseV = U->get();
1652   if (!UseV->getType()->isPointerTy())
1653     return 0;
1654 
1655   // We need to follow common pointer manipulation uses to the accesses they
1656   // feed into. We can try to be smart to avoid looking through things we do not
1657   // like for now, e.g., non-inbounds GEPs.
1658   if (isa<CastInst>(I)) {
1659     TrackUse = true;
1660     return 0;
1661   }
1662 
1663   if (isa<GetElementPtrInst>(I)) {
1664     TrackUse = true;
1665     return 0;
1666   }
1667 
1668   Type *PtrTy = UseV->getType();
1669   const Function *F = I->getFunction();
1670   bool NullPointerIsDefined =
1671       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1672   const DataLayout &DL = A.getInfoCache().getDL();
1673   if (const auto *CB = dyn_cast<CallBase>(I)) {
1674     if (CB->isBundleOperand(U)) {
1675       if (RetainedKnowledge RK = getKnowledgeFromUse(
1676               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1677         IsNonNull |=
1678             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1679         return RK.ArgValue;
1680       }
1681       return 0;
1682     }
1683 
1684     if (CB->isCallee(U)) {
1685       IsNonNull |= !NullPointerIsDefined;
1686       return 0;
1687     }
1688 
1689     unsigned ArgNo = CB->getArgOperandNo(U);
1690     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1691     // As long as we only use known information there is no need to track
1692     // dependences here.
1693     auto &DerefAA =
1694         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1695     IsNonNull |= DerefAA.isKnownNonNull();
1696     return DerefAA.getKnownDereferenceableBytes();
1697   }
1698 
1699   int64_t Offset;
1700   const Value *Base =
1701       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1702   if (Base) {
1703     if (Base == &AssociatedValue &&
1704         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1705       int64_t DerefBytes =
1706           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1707 
1708       IsNonNull |= !NullPointerIsDefined;
1709       return std::max(int64_t(0), DerefBytes);
1710     }
1711   }
1712 
1713   /// Corner case when an offset is 0.
1714   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1715                                               /*AllowNonInbounds*/ true);
1716   if (Base) {
1717     if (Offset == 0 && Base == &AssociatedValue &&
1718         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1719       int64_t DerefBytes =
1720           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1721       IsNonNull |= !NullPointerIsDefined;
1722       return std::max(int64_t(0), DerefBytes);
1723     }
1724   }
1725 
1726   return 0;
1727 }
1728 
1729 struct AANonNullImpl : AANonNull {
1730   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1731       : AANonNull(IRP, A),
1732         NullIsDefined(NullPointerIsDefined(
1733             getAnchorScope(),
1734             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1735 
1736   /// See AbstractAttribute::initialize(...).
1737   void initialize(Attributor &A) override {
1738     Value &V = getAssociatedValue();
1739     if (!NullIsDefined &&
1740         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1741                 /* IgnoreSubsumingPositions */ false, &A)) {
1742       indicateOptimisticFixpoint();
1743       return;
1744     }
1745 
1746     if (isa<ConstantPointerNull>(V)) {
1747       indicatePessimisticFixpoint();
1748       return;
1749     }
1750 
1751     AANonNull::initialize(A);
1752 
1753     bool CanBeNull, CanBeFreed;
1754     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1755                                          CanBeFreed)) {
1756       if (!CanBeNull) {
1757         indicateOptimisticFixpoint();
1758         return;
1759       }
1760     }
1761 
1762     if (isa<GlobalValue>(&getAssociatedValue())) {
1763       indicatePessimisticFixpoint();
1764       return;
1765     }
1766 
1767     if (Instruction *CtxI = getCtxI())
1768       followUsesInMBEC(*this, A, getState(), *CtxI);
1769   }
1770 
1771   /// See followUsesInMBEC
1772   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1773                        AANonNull::StateType &State) {
1774     bool IsNonNull = false;
1775     bool TrackUse = false;
1776     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1777                                        IsNonNull, TrackUse);
1778     State.setKnown(IsNonNull);
1779     return TrackUse;
1780   }
1781 
1782   /// See AbstractAttribute::getAsStr().
1783   const std::string getAsStr() const override {
1784     return getAssumed() ? "nonnull" : "may-null";
1785   }
1786 
1787   /// Flag to determine if the underlying value can be null and still allow
1788   /// valid accesses.
1789   const bool NullIsDefined;
1790 };
1791 
1792 /// NonNull attribute for a floating value.
1793 struct AANonNullFloating : public AANonNullImpl {
1794   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1795       : AANonNullImpl(IRP, A) {}
1796 
1797   /// See AbstractAttribute::updateImpl(...).
1798   ChangeStatus updateImpl(Attributor &A) override {
1799     const DataLayout &DL = A.getDataLayout();
1800 
1801     DominatorTree *DT = nullptr;
1802     AssumptionCache *AC = nullptr;
1803     InformationCache &InfoCache = A.getInfoCache();
1804     if (const Function *Fn = getAnchorScope()) {
1805       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1806       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1807     }
1808 
1809     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1810                             AANonNull::StateType &T, bool Stripped) -> bool {
1811       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1812                                              DepClassTy::REQUIRED);
1813       if (!Stripped && this == &AA) {
1814         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1815           T.indicatePessimisticFixpoint();
1816       } else {
1817         // Use abstract attribute information.
1818         const AANonNull::StateType &NS = AA.getState();
1819         T ^= NS;
1820       }
1821       return T.isValidState();
1822     };
1823 
1824     StateType T;
1825     if (!genericValueTraversal<AANonNull, StateType>(
1826             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1827       return indicatePessimisticFixpoint();
1828 
1829     return clampStateAndIndicateChange(getState(), T);
1830   }
1831 
1832   /// See AbstractAttribute::trackStatistics()
1833   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1834 };
1835 
1836 /// NonNull attribute for function return value.
1837 struct AANonNullReturned final
1838     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1839   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1840       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1841 
1842   /// See AbstractAttribute::getAsStr().
1843   const std::string getAsStr() const override {
1844     return getAssumed() ? "nonnull" : "may-null";
1845   }
1846 
1847   /// See AbstractAttribute::trackStatistics()
1848   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1849 };
1850 
1851 /// NonNull attribute for function argument.
1852 struct AANonNullArgument final
1853     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1854   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1855       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1856 
1857   /// See AbstractAttribute::trackStatistics()
1858   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1859 };
1860 
1861 struct AANonNullCallSiteArgument final : AANonNullFloating {
1862   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1863       : AANonNullFloating(IRP, A) {}
1864 
1865   /// See AbstractAttribute::trackStatistics()
1866   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1867 };
1868 
1869 /// NonNull attribute for a call site return position.
1870 struct AANonNullCallSiteReturned final
1871     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1872   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1873       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1874 
1875   /// See AbstractAttribute::trackStatistics()
1876   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1877 };
1878 
1879 /// ------------------------ No-Recurse Attributes ----------------------------
1880 
1881 struct AANoRecurseImpl : public AANoRecurse {
1882   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1883 
1884   /// See AbstractAttribute::getAsStr()
1885   const std::string getAsStr() const override {
1886     return getAssumed() ? "norecurse" : "may-recurse";
1887   }
1888 };
1889 
1890 struct AANoRecurseFunction final : AANoRecurseImpl {
1891   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1892       : AANoRecurseImpl(IRP, A) {}
1893 
1894   /// See AbstractAttribute::initialize(...).
1895   void initialize(Attributor &A) override {
1896     AANoRecurseImpl::initialize(A);
1897     if (const Function *F = getAnchorScope())
1898       if (A.getInfoCache().getSccSize(*F) != 1)
1899         indicatePessimisticFixpoint();
1900   }
1901 
1902   /// See AbstractAttribute::updateImpl(...).
1903   ChangeStatus updateImpl(Attributor &A) override {
1904 
1905     // If all live call sites are known to be no-recurse, we are as well.
1906     auto CallSitePred = [&](AbstractCallSite ACS) {
1907       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1908           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1909           DepClassTy::NONE);
1910       return NoRecurseAA.isKnownNoRecurse();
1911     };
1912     bool AllCallSitesKnown;
1913     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1914       // If we know all call sites and all are known no-recurse, we are done.
1915       // If all known call sites, which might not be all that exist, are known
1916       // to be no-recurse, we are not done but we can continue to assume
1917       // no-recurse. If one of the call sites we have not visited will become
1918       // live, another update is triggered.
1919       if (AllCallSitesKnown)
1920         indicateOptimisticFixpoint();
1921       return ChangeStatus::UNCHANGED;
1922     }
1923 
1924     // If the above check does not hold anymore we look at the calls.
1925     auto CheckForNoRecurse = [&](Instruction &I) {
1926       const auto &CB = cast<CallBase>(I);
1927       if (CB.hasFnAttr(Attribute::NoRecurse))
1928         return true;
1929 
1930       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1931           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1932       if (!NoRecurseAA.isAssumedNoRecurse())
1933         return false;
1934 
1935       // Recursion to the same function
1936       if (CB.getCalledFunction() == getAnchorScope())
1937         return false;
1938 
1939       return true;
1940     };
1941 
1942     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1943       return indicatePessimisticFixpoint();
1944     return ChangeStatus::UNCHANGED;
1945   }
1946 
1947   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1948 };
1949 
1950 /// NoRecurse attribute deduction for a call sites.
1951 struct AANoRecurseCallSite final : AANoRecurseImpl {
1952   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1953       : AANoRecurseImpl(IRP, A) {}
1954 
1955   /// See AbstractAttribute::initialize(...).
1956   void initialize(Attributor &A) override {
1957     AANoRecurseImpl::initialize(A);
1958     Function *F = getAssociatedFunction();
1959     if (!F || F->isDeclaration())
1960       indicatePessimisticFixpoint();
1961   }
1962 
1963   /// See AbstractAttribute::updateImpl(...).
1964   ChangeStatus updateImpl(Attributor &A) override {
1965     // TODO: Once we have call site specific value information we can provide
1966     //       call site specific liveness information and then it makes
1967     //       sense to specialize attributes for call sites arguments instead of
1968     //       redirecting requests to the callee argument.
1969     Function *F = getAssociatedFunction();
1970     const IRPosition &FnPos = IRPosition::function(*F);
1971     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1972     return clampStateAndIndicateChange(getState(), FnAA.getState());
1973   }
1974 
1975   /// See AbstractAttribute::trackStatistics()
1976   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1977 };
1978 
1979 /// -------------------- Undefined-Behavior Attributes ------------------------
1980 
1981 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1982   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1983       : AAUndefinedBehavior(IRP, A) {}
1984 
1985   /// See AbstractAttribute::updateImpl(...).
1986   // through a pointer (i.e. also branches etc.)
1987   ChangeStatus updateImpl(Attributor &A) override {
1988     const size_t UBPrevSize = KnownUBInsts.size();
1989     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1990 
1991     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1992       // Skip instructions that are already saved.
1993       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1994         return true;
1995 
1996       // If we reach here, we know we have an instruction
1997       // that accesses memory through a pointer operand,
1998       // for which getPointerOperand() should give it to us.
1999       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
2000       assert(PtrOp &&
2001              "Expected pointer operand of memory accessing instruction");
2002 
2003       // Either we stopped and the appropriate action was taken,
2004       // or we got back a simplified value to continue.
2005       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
2006       if (!SimplifiedPtrOp.hasValue())
2007         return true;
2008       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
2009 
2010       // A memory access through a pointer is considered UB
2011       // only if the pointer has constant null value.
2012       // TODO: Expand it to not only check constant values.
2013       if (!isa<ConstantPointerNull>(PtrOpVal)) {
2014         AssumedNoUBInsts.insert(&I);
2015         return true;
2016       }
2017       const Type *PtrTy = PtrOpVal->getType();
2018 
2019       // Because we only consider instructions inside functions,
2020       // assume that a parent function exists.
2021       const Function *F = I.getFunction();
2022 
2023       // A memory access using constant null pointer is only considered UB
2024       // if null pointer is _not_ defined for the target platform.
2025       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
2026         AssumedNoUBInsts.insert(&I);
2027       else
2028         KnownUBInsts.insert(&I);
2029       return true;
2030     };
2031 
2032     auto InspectBrInstForUB = [&](Instruction &I) {
2033       // A conditional branch instruction is considered UB if it has `undef`
2034       // condition.
2035 
2036       // Skip instructions that are already saved.
2037       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2038         return true;
2039 
2040       // We know we have a branch instruction.
2041       auto BrInst = cast<BranchInst>(&I);
2042 
2043       // Unconditional branches are never considered UB.
2044       if (BrInst->isUnconditional())
2045         return true;
2046 
2047       // Either we stopped and the appropriate action was taken,
2048       // or we got back a simplified value to continue.
2049       Optional<Value *> SimplifiedCond =
2050           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
2051       if (!SimplifiedCond.hasValue())
2052         return true;
2053       AssumedNoUBInsts.insert(&I);
2054       return true;
2055     };
2056 
2057     auto InspectCallSiteForUB = [&](Instruction &I) {
2058       // Check whether a callsite always cause UB or not
2059 
2060       // Skip instructions that are already saved.
2061       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2062         return true;
2063 
2064       // Check nonnull and noundef argument attribute violation for each
2065       // callsite.
2066       CallBase &CB = cast<CallBase>(I);
2067       Function *Callee = CB.getCalledFunction();
2068       if (!Callee)
2069         return true;
2070       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2071         // If current argument is known to be simplified to null pointer and the
2072         // corresponding argument position is known to have nonnull attribute,
2073         // the argument is poison. Furthermore, if the argument is poison and
2074         // the position is known to have noundef attriubte, this callsite is
2075         // considered UB.
2076         if (idx >= Callee->arg_size())
2077           break;
2078         Value *ArgVal = CB.getArgOperand(idx);
2079         if (!ArgVal)
2080           continue;
2081         // Here, we handle three cases.
2082         //   (1) Not having a value means it is dead. (we can replace the value
2083         //       with undef)
2084         //   (2) Simplified to undef. The argument violate noundef attriubte.
2085         //   (3) Simplified to null pointer where known to be nonnull.
2086         //       The argument is a poison value and violate noundef attribute.
2087         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2088         auto &NoUndefAA =
2089             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2090         if (!NoUndefAA.isKnownNoUndef())
2091           continue;
2092         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2093             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2094         if (!ValueSimplifyAA.isKnown())
2095           continue;
2096         Optional<Value *> SimplifiedVal =
2097             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2098         if (!SimplifiedVal.hasValue() ||
2099             isa<UndefValue>(*SimplifiedVal.getValue())) {
2100           KnownUBInsts.insert(&I);
2101           continue;
2102         }
2103         if (!ArgVal->getType()->isPointerTy() ||
2104             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2105           continue;
2106         auto &NonNullAA =
2107             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2108         if (NonNullAA.isKnownNonNull())
2109           KnownUBInsts.insert(&I);
2110       }
2111       return true;
2112     };
2113 
2114     auto InspectReturnInstForUB =
2115         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2116           // Check if a return instruction always cause UB or not
2117           // Note: It is guaranteed that the returned position of the anchor
2118           //       scope has noundef attribute when this is called.
2119           //       We also ensure the return position is not "assumed dead"
2120           //       because the returned value was then potentially simplified to
2121           //       `undef` in AAReturnedValues without removing the `noundef`
2122           //       attribute yet.
2123 
2124           // When the returned position has noundef attriubte, UB occur in the
2125           // following cases.
2126           //   (1) Returned value is known to be undef.
2127           //   (2) The value is known to be a null pointer and the returned
2128           //       position has nonnull attribute (because the returned value is
2129           //       poison).
2130           bool FoundUB = false;
2131           if (isa<UndefValue>(V)) {
2132             FoundUB = true;
2133           } else {
2134             if (isa<ConstantPointerNull>(V)) {
2135               auto &NonNullAA = A.getAAFor<AANonNull>(
2136                   *this, IRPosition::returned(*getAnchorScope()),
2137                   DepClassTy::NONE);
2138               if (NonNullAA.isKnownNonNull())
2139                 FoundUB = true;
2140             }
2141           }
2142 
2143           if (FoundUB)
2144             for (ReturnInst *RI : RetInsts)
2145               KnownUBInsts.insert(RI);
2146           return true;
2147         };
2148 
2149     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2150                               {Instruction::Load, Instruction::Store,
2151                                Instruction::AtomicCmpXchg,
2152                                Instruction::AtomicRMW},
2153                               /* CheckBBLivenessOnly */ true);
2154     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2155                               /* CheckBBLivenessOnly */ true);
2156     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2157 
2158     // If the returned position of the anchor scope has noundef attriubte, check
2159     // all returned instructions.
2160     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2161       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2162       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2163         auto &RetPosNoUndefAA =
2164             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2165         if (RetPosNoUndefAA.isKnownNoUndef())
2166           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2167                                                     *this);
2168       }
2169     }
2170 
2171     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2172         UBPrevSize != KnownUBInsts.size())
2173       return ChangeStatus::CHANGED;
2174     return ChangeStatus::UNCHANGED;
2175   }
2176 
2177   bool isKnownToCauseUB(Instruction *I) const override {
2178     return KnownUBInsts.count(I);
2179   }
2180 
2181   bool isAssumedToCauseUB(Instruction *I) const override {
2182     // In simple words, if an instruction is not in the assumed to _not_
2183     // cause UB, then it is assumed UB (that includes those
2184     // in the KnownUBInsts set). The rest is boilerplate
2185     // is to ensure that it is one of the instructions we test
2186     // for UB.
2187 
2188     switch (I->getOpcode()) {
2189     case Instruction::Load:
2190     case Instruction::Store:
2191     case Instruction::AtomicCmpXchg:
2192     case Instruction::AtomicRMW:
2193       return !AssumedNoUBInsts.count(I);
2194     case Instruction::Br: {
2195       auto BrInst = cast<BranchInst>(I);
2196       if (BrInst->isUnconditional())
2197         return false;
2198       return !AssumedNoUBInsts.count(I);
2199     } break;
2200     default:
2201       return false;
2202     }
2203     return false;
2204   }
2205 
2206   ChangeStatus manifest(Attributor &A) override {
2207     if (KnownUBInsts.empty())
2208       return ChangeStatus::UNCHANGED;
2209     for (Instruction *I : KnownUBInsts)
2210       A.changeToUnreachableAfterManifest(I);
2211     return ChangeStatus::CHANGED;
2212   }
2213 
2214   /// See AbstractAttribute::getAsStr()
2215   const std::string getAsStr() const override {
2216     return getAssumed() ? "undefined-behavior" : "no-ub";
2217   }
2218 
2219   /// Note: The correctness of this analysis depends on the fact that the
2220   /// following 2 sets will stop changing after some point.
2221   /// "Change" here means that their size changes.
2222   /// The size of each set is monotonically increasing
2223   /// (we only add items to them) and it is upper bounded by the number of
2224   /// instructions in the processed function (we can never save more
2225   /// elements in either set than this number). Hence, at some point,
2226   /// they will stop increasing.
2227   /// Consequently, at some point, both sets will have stopped
2228   /// changing, effectively making the analysis reach a fixpoint.
2229 
2230   /// Note: These 2 sets are disjoint and an instruction can be considered
2231   /// one of 3 things:
2232   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2233   ///    the KnownUBInsts set.
2234   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2235   ///    has a reason to assume it).
2236   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2237   ///    could not find a reason to assume or prove that it can cause UB,
2238   ///    hence it assumes it doesn't. We have a set for these instructions
2239   ///    so that we don't reprocess them in every update.
2240   ///    Note however that instructions in this set may cause UB.
2241 
2242 protected:
2243   /// A set of all live instructions _known_ to cause UB.
2244   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2245 
2246 private:
2247   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2248   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2249 
2250   // Should be called on updates in which if we're processing an instruction
2251   // \p I that depends on a value \p V, one of the following has to happen:
2252   // - If the value is assumed, then stop.
2253   // - If the value is known but undef, then consider it UB.
2254   // - Otherwise, do specific processing with the simplified value.
2255   // We return None in the first 2 cases to signify that an appropriate
2256   // action was taken and the caller should stop.
2257   // Otherwise, we return the simplified value that the caller should
2258   // use for specific processing.
2259   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2260                                          Instruction *I) {
2261     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2262         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2263     Optional<Value *> SimplifiedV =
2264         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2265     if (!ValueSimplifyAA.isKnown()) {
2266       // Don't depend on assumed values.
2267       return llvm::None;
2268     }
2269     if (!SimplifiedV.hasValue()) {
2270       // If it is known (which we tested above) but it doesn't have a value,
2271       // then we can assume `undef` and hence the instruction is UB.
2272       KnownUBInsts.insert(I);
2273       return llvm::None;
2274     }
2275     Value *Val = SimplifiedV.getValue();
2276     if (isa<UndefValue>(Val)) {
2277       KnownUBInsts.insert(I);
2278       return llvm::None;
2279     }
2280     return Val;
2281   }
2282 };
2283 
2284 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2285   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2286       : AAUndefinedBehaviorImpl(IRP, A) {}
2287 
2288   /// See AbstractAttribute::trackStatistics()
2289   void trackStatistics() const override {
2290     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2291                "Number of instructions known to have UB");
2292     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2293         KnownUBInsts.size();
2294   }
2295 };
2296 
2297 /// ------------------------ Will-Return Attributes ----------------------------
2298 
2299 // Helper function that checks whether a function has any cycle which we don't
2300 // know if it is bounded or not.
2301 // Loops with maximum trip count are considered bounded, any other cycle not.
2302 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2303   ScalarEvolution *SE =
2304       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2305   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2306   // If either SCEV or LoopInfo is not available for the function then we assume
2307   // any cycle to be unbounded cycle.
2308   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2309   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2310   if (!SE || !LI) {
2311     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2312       if (SCCI.hasCycle())
2313         return true;
2314     return false;
2315   }
2316 
2317   // If there's irreducible control, the function may contain non-loop cycles.
2318   if (mayContainIrreducibleControl(F, LI))
2319     return true;
2320 
2321   // Any loop that does not have a max trip count is considered unbounded cycle.
2322   for (auto *L : LI->getLoopsInPreorder()) {
2323     if (!SE->getSmallConstantMaxTripCount(L))
2324       return true;
2325   }
2326   return false;
2327 }
2328 
2329 struct AAWillReturnImpl : public AAWillReturn {
2330   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2331       : AAWillReturn(IRP, A) {}
2332 
2333   /// See AbstractAttribute::initialize(...).
2334   void initialize(Attributor &A) override {
2335     AAWillReturn::initialize(A);
2336 
2337     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2338       indicateOptimisticFixpoint();
2339       return;
2340     }
2341   }
2342 
2343   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2344   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2345     // Check for `mustprogress` in the scope and the associated function which
2346     // might be different if this is a call site.
2347     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2348         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2349       return false;
2350 
2351     const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2352                                                       DepClassTy::NONE);
2353     if (!MemAA.isAssumedReadOnly())
2354       return false;
2355     if (KnownOnly && !MemAA.isKnownReadOnly())
2356       return false;
2357     if (!MemAA.isKnownReadOnly())
2358       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2359 
2360     return true;
2361   }
2362 
2363   /// See AbstractAttribute::updateImpl(...).
2364   ChangeStatus updateImpl(Attributor &A) override {
2365     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2366       return ChangeStatus::UNCHANGED;
2367 
2368     auto CheckForWillReturn = [&](Instruction &I) {
2369       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2370       const auto &WillReturnAA =
2371           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2372       if (WillReturnAA.isKnownWillReturn())
2373         return true;
2374       if (!WillReturnAA.isAssumedWillReturn())
2375         return false;
2376       const auto &NoRecurseAA =
2377           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2378       return NoRecurseAA.isAssumedNoRecurse();
2379     };
2380 
2381     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2382       return indicatePessimisticFixpoint();
2383 
2384     return ChangeStatus::UNCHANGED;
2385   }
2386 
2387   /// See AbstractAttribute::getAsStr()
2388   const std::string getAsStr() const override {
2389     return getAssumed() ? "willreturn" : "may-noreturn";
2390   }
2391 };
2392 
2393 struct AAWillReturnFunction final : AAWillReturnImpl {
2394   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2395       : AAWillReturnImpl(IRP, A) {}
2396 
2397   /// See AbstractAttribute::initialize(...).
2398   void initialize(Attributor &A) override {
2399     AAWillReturnImpl::initialize(A);
2400 
2401     Function *F = getAnchorScope();
2402     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2403       indicatePessimisticFixpoint();
2404   }
2405 
2406   /// See AbstractAttribute::trackStatistics()
2407   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2408 };
2409 
2410 /// WillReturn attribute deduction for a call sites.
2411 struct AAWillReturnCallSite final : AAWillReturnImpl {
2412   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2413       : AAWillReturnImpl(IRP, A) {}
2414 
2415   /// See AbstractAttribute::initialize(...).
2416   void initialize(Attributor &A) override {
2417     AAWillReturnImpl::initialize(A);
2418     Function *F = getAssociatedFunction();
2419     if (!F || !A.isFunctionIPOAmendable(*F))
2420       indicatePessimisticFixpoint();
2421   }
2422 
2423   /// See AbstractAttribute::updateImpl(...).
2424   ChangeStatus updateImpl(Attributor &A) override {
2425     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2426       return ChangeStatus::UNCHANGED;
2427 
2428     // TODO: Once we have call site specific value information we can provide
2429     //       call site specific liveness information and then it makes
2430     //       sense to specialize attributes for call sites arguments instead of
2431     //       redirecting requests to the callee argument.
2432     Function *F = getAssociatedFunction();
2433     const IRPosition &FnPos = IRPosition::function(*F);
2434     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2435     return clampStateAndIndicateChange(getState(), FnAA.getState());
2436   }
2437 
2438   /// See AbstractAttribute::trackStatistics()
2439   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2440 };
2441 
2442 /// -------------------AAReachability Attribute--------------------------
2443 
2444 struct AAReachabilityImpl : AAReachability {
2445   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2446       : AAReachability(IRP, A) {}
2447 
2448   const std::string getAsStr() const override {
2449     // TODO: Return the number of reachable queries.
2450     return "reachable";
2451   }
2452 
2453   /// See AbstractAttribute::initialize(...).
2454   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2455 
2456   /// See AbstractAttribute::updateImpl(...).
2457   ChangeStatus updateImpl(Attributor &A) override {
2458     return indicatePessimisticFixpoint();
2459   }
2460 };
2461 
2462 struct AAReachabilityFunction final : public AAReachabilityImpl {
2463   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2464       : AAReachabilityImpl(IRP, A) {}
2465 
2466   /// See AbstractAttribute::trackStatistics()
2467   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2468 };
2469 
2470 /// ------------------------ NoAlias Argument Attribute ------------------------
2471 
2472 struct AANoAliasImpl : AANoAlias {
2473   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2474     assert(getAssociatedType()->isPointerTy() &&
2475            "Noalias is a pointer attribute");
2476   }
2477 
2478   const std::string getAsStr() const override {
2479     return getAssumed() ? "noalias" : "may-alias";
2480   }
2481 };
2482 
2483 /// NoAlias attribute for a floating value.
2484 struct AANoAliasFloating final : AANoAliasImpl {
2485   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2486       : AANoAliasImpl(IRP, A) {}
2487 
2488   /// See AbstractAttribute::initialize(...).
2489   void initialize(Attributor &A) override {
2490     AANoAliasImpl::initialize(A);
2491     Value *Val = &getAssociatedValue();
2492     do {
2493       CastInst *CI = dyn_cast<CastInst>(Val);
2494       if (!CI)
2495         break;
2496       Value *Base = CI->getOperand(0);
2497       if (!Base->hasOneUse())
2498         break;
2499       Val = Base;
2500     } while (true);
2501 
2502     if (!Val->getType()->isPointerTy()) {
2503       indicatePessimisticFixpoint();
2504       return;
2505     }
2506 
2507     if (isa<AllocaInst>(Val))
2508       indicateOptimisticFixpoint();
2509     else if (isa<ConstantPointerNull>(Val) &&
2510              !NullPointerIsDefined(getAnchorScope(),
2511                                    Val->getType()->getPointerAddressSpace()))
2512       indicateOptimisticFixpoint();
2513     else if (Val != &getAssociatedValue()) {
2514       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2515           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2516       if (ValNoAliasAA.isKnownNoAlias())
2517         indicateOptimisticFixpoint();
2518     }
2519   }
2520 
2521   /// See AbstractAttribute::updateImpl(...).
2522   ChangeStatus updateImpl(Attributor &A) override {
2523     // TODO: Implement this.
2524     return indicatePessimisticFixpoint();
2525   }
2526 
2527   /// See AbstractAttribute::trackStatistics()
2528   void trackStatistics() const override {
2529     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2530   }
2531 };
2532 
2533 /// NoAlias attribute for an argument.
2534 struct AANoAliasArgument final
2535     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2536   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2537   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2538 
2539   /// See AbstractAttribute::initialize(...).
2540   void initialize(Attributor &A) override {
2541     Base::initialize(A);
2542     // See callsite argument attribute and callee argument attribute.
2543     if (hasAttr({Attribute::ByVal}))
2544       indicateOptimisticFixpoint();
2545   }
2546 
2547   /// See AbstractAttribute::update(...).
2548   ChangeStatus updateImpl(Attributor &A) override {
2549     // We have to make sure no-alias on the argument does not break
2550     // synchronization when this is a callback argument, see also [1] below.
2551     // If synchronization cannot be affected, we delegate to the base updateImpl
2552     // function, otherwise we give up for now.
2553 
2554     // If the function is no-sync, no-alias cannot break synchronization.
2555     const auto &NoSyncAA =
2556         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2557                              DepClassTy::OPTIONAL);
2558     if (NoSyncAA.isAssumedNoSync())
2559       return Base::updateImpl(A);
2560 
2561     // If the argument is read-only, no-alias cannot break synchronization.
2562     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2563         *this, getIRPosition(), DepClassTy::OPTIONAL);
2564     if (MemBehaviorAA.isAssumedReadOnly())
2565       return Base::updateImpl(A);
2566 
2567     // If the argument is never passed through callbacks, no-alias cannot break
2568     // synchronization.
2569     bool AllCallSitesKnown;
2570     if (A.checkForAllCallSites(
2571             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2572             true, AllCallSitesKnown))
2573       return Base::updateImpl(A);
2574 
2575     // TODO: add no-alias but make sure it doesn't break synchronization by
2576     // introducing fake uses. See:
2577     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2578     //     International Workshop on OpenMP 2018,
2579     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2580 
2581     return indicatePessimisticFixpoint();
2582   }
2583 
2584   /// See AbstractAttribute::trackStatistics()
2585   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2586 };
2587 
2588 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2589   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2590       : AANoAliasImpl(IRP, A) {}
2591 
2592   /// See AbstractAttribute::initialize(...).
2593   void initialize(Attributor &A) override {
2594     // See callsite argument attribute and callee argument attribute.
2595     const auto &CB = cast<CallBase>(getAnchorValue());
2596     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2597       indicateOptimisticFixpoint();
2598     Value &Val = getAssociatedValue();
2599     if (isa<ConstantPointerNull>(Val) &&
2600         !NullPointerIsDefined(getAnchorScope(),
2601                               Val.getType()->getPointerAddressSpace()))
2602       indicateOptimisticFixpoint();
2603   }
2604 
2605   /// Determine if the underlying value may alias with the call site argument
2606   /// \p OtherArgNo of \p ICS (= the underlying call site).
2607   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2608                             const AAMemoryBehavior &MemBehaviorAA,
2609                             const CallBase &CB, unsigned OtherArgNo) {
2610     // We do not need to worry about aliasing with the underlying IRP.
2611     if (this->getCalleeArgNo() == (int)OtherArgNo)
2612       return false;
2613 
2614     // If it is not a pointer or pointer vector we do not alias.
2615     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2616     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2617       return false;
2618 
2619     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2620         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2621 
2622     // If the argument is readnone, there is no read-write aliasing.
2623     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2624       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2625       return false;
2626     }
2627 
2628     // If the argument is readonly and the underlying value is readonly, there
2629     // is no read-write aliasing.
2630     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2631     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2632       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2633       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2634       return false;
2635     }
2636 
2637     // We have to utilize actual alias analysis queries so we need the object.
2638     if (!AAR)
2639       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2640 
2641     // Try to rule it out at the call site.
2642     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2643     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2644                          "callsite arguments: "
2645                       << getAssociatedValue() << " " << *ArgOp << " => "
2646                       << (IsAliasing ? "" : "no-") << "alias \n");
2647 
2648     return IsAliasing;
2649   }
2650 
2651   bool
2652   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2653                                          const AAMemoryBehavior &MemBehaviorAA,
2654                                          const AANoAlias &NoAliasAA) {
2655     // We can deduce "noalias" if the following conditions hold.
2656     // (i)   Associated value is assumed to be noalias in the definition.
2657     // (ii)  Associated value is assumed to be no-capture in all the uses
2658     //       possibly executed before this callsite.
2659     // (iii) There is no other pointer argument which could alias with the
2660     //       value.
2661 
2662     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2663     if (!AssociatedValueIsNoAliasAtDef) {
2664       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2665                         << " is not no-alias at the definition\n");
2666       return false;
2667     }
2668 
2669     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2670 
2671     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2672     const Function *ScopeFn = VIRP.getAnchorScope();
2673     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2674     // Check whether the value is captured in the scope using AANoCapture.
2675     //      Look at CFG and check only uses possibly executed before this
2676     //      callsite.
2677     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2678       Instruction *UserI = cast<Instruction>(U.getUser());
2679 
2680       // If UserI is the curr instruction and there is a single potential use of
2681       // the value in UserI we allow the use.
2682       // TODO: We should inspect the operands and allow those that cannot alias
2683       //       with the value.
2684       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2685         return true;
2686 
2687       if (ScopeFn) {
2688         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2689             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2690 
2691         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2692           return true;
2693 
2694         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2695           if (CB->isArgOperand(&U)) {
2696 
2697             unsigned ArgNo = CB->getArgOperandNo(&U);
2698 
2699             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2700                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2701                 DepClassTy::OPTIONAL);
2702 
2703             if (NoCaptureAA.isAssumedNoCapture())
2704               return true;
2705           }
2706         }
2707       }
2708 
2709       // For cases which can potentially have more users
2710       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2711           isa<SelectInst>(U)) {
2712         Follow = true;
2713         return true;
2714       }
2715 
2716       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2717       return false;
2718     };
2719 
2720     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2721       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2722         LLVM_DEBUG(
2723             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2724                    << " cannot be noalias as it is potentially captured\n");
2725         return false;
2726       }
2727     }
2728     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2729 
2730     // Check there is no other pointer argument which could alias with the
2731     // value passed at this call site.
2732     // TODO: AbstractCallSite
2733     const auto &CB = cast<CallBase>(getAnchorValue());
2734     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2735          OtherArgNo++)
2736       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2737         return false;
2738 
2739     return true;
2740   }
2741 
2742   /// See AbstractAttribute::updateImpl(...).
2743   ChangeStatus updateImpl(Attributor &A) override {
2744     // If the argument is readnone we are done as there are no accesses via the
2745     // argument.
2746     auto &MemBehaviorAA =
2747         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2748     if (MemBehaviorAA.isAssumedReadNone()) {
2749       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2750       return ChangeStatus::UNCHANGED;
2751     }
2752 
2753     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2754     const auto &NoAliasAA =
2755         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2756 
2757     AAResults *AAR = nullptr;
2758     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2759                                                NoAliasAA)) {
2760       LLVM_DEBUG(
2761           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2762       return ChangeStatus::UNCHANGED;
2763     }
2764 
2765     return indicatePessimisticFixpoint();
2766   }
2767 
2768   /// See AbstractAttribute::trackStatistics()
2769   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2770 };
2771 
2772 /// NoAlias attribute for function return value.
2773 struct AANoAliasReturned final : AANoAliasImpl {
2774   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2775       : AANoAliasImpl(IRP, A) {}
2776 
2777   /// See AbstractAttribute::initialize(...).
2778   void initialize(Attributor &A) override {
2779     AANoAliasImpl::initialize(A);
2780     Function *F = getAssociatedFunction();
2781     if (!F || F->isDeclaration())
2782       indicatePessimisticFixpoint();
2783   }
2784 
2785   /// See AbstractAttribute::updateImpl(...).
2786   virtual ChangeStatus updateImpl(Attributor &A) override {
2787 
2788     auto CheckReturnValue = [&](Value &RV) -> bool {
2789       if (Constant *C = dyn_cast<Constant>(&RV))
2790         if (C->isNullValue() || isa<UndefValue>(C))
2791           return true;
2792 
2793       /// For now, we can only deduce noalias if we have call sites.
2794       /// FIXME: add more support.
2795       if (!isa<CallBase>(&RV))
2796         return false;
2797 
2798       const IRPosition &RVPos = IRPosition::value(RV);
2799       const auto &NoAliasAA =
2800           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2801       if (!NoAliasAA.isAssumedNoAlias())
2802         return false;
2803 
2804       const auto &NoCaptureAA =
2805           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2806       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2807     };
2808 
2809     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2810       return indicatePessimisticFixpoint();
2811 
2812     return ChangeStatus::UNCHANGED;
2813   }
2814 
2815   /// See AbstractAttribute::trackStatistics()
2816   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2817 };
2818 
2819 /// NoAlias attribute deduction for a call site return value.
2820 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2821   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2822       : AANoAliasImpl(IRP, A) {}
2823 
2824   /// See AbstractAttribute::initialize(...).
2825   void initialize(Attributor &A) override {
2826     AANoAliasImpl::initialize(A);
2827     Function *F = getAssociatedFunction();
2828     if (!F || F->isDeclaration())
2829       indicatePessimisticFixpoint();
2830   }
2831 
2832   /// See AbstractAttribute::updateImpl(...).
2833   ChangeStatus updateImpl(Attributor &A) override {
2834     // TODO: Once we have call site specific value information we can provide
2835     //       call site specific liveness information and then it makes
2836     //       sense to specialize attributes for call sites arguments instead of
2837     //       redirecting requests to the callee argument.
2838     Function *F = getAssociatedFunction();
2839     const IRPosition &FnPos = IRPosition::returned(*F);
2840     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2841     return clampStateAndIndicateChange(getState(), FnAA.getState());
2842   }
2843 
2844   /// See AbstractAttribute::trackStatistics()
2845   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2846 };
2847 
2848 /// -------------------AAIsDead Function Attribute-----------------------
2849 
2850 struct AAIsDeadValueImpl : public AAIsDead {
2851   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2852 
2853   /// See AAIsDead::isAssumedDead().
2854   bool isAssumedDead() const override { return getAssumed(); }
2855 
2856   /// See AAIsDead::isKnownDead().
2857   bool isKnownDead() const override { return getKnown(); }
2858 
2859   /// See AAIsDead::isAssumedDead(BasicBlock *).
2860   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2861 
2862   /// See AAIsDead::isKnownDead(BasicBlock *).
2863   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2864 
2865   /// See AAIsDead::isAssumedDead(Instruction *I).
2866   bool isAssumedDead(const Instruction *I) const override {
2867     return I == getCtxI() && isAssumedDead();
2868   }
2869 
2870   /// See AAIsDead::isKnownDead(Instruction *I).
2871   bool isKnownDead(const Instruction *I) const override {
2872     return isAssumedDead(I) && getKnown();
2873   }
2874 
2875   /// See AbstractAttribute::getAsStr().
2876   const std::string getAsStr() const override {
2877     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2878   }
2879 
2880   /// Check if all uses are assumed dead.
2881   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2882     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2883     // Explicitly set the dependence class to required because we want a long
2884     // chain of N dependent instructions to be considered live as soon as one is
2885     // without going through N update cycles. This is not required for
2886     // correctness.
2887     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2888   }
2889 
2890   /// Determine if \p I is assumed to be side-effect free.
2891   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2892     if (!I || wouldInstructionBeTriviallyDead(I))
2893       return true;
2894 
2895     auto *CB = dyn_cast<CallBase>(I);
2896     if (!CB || isa<IntrinsicInst>(CB))
2897       return false;
2898 
2899     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2900     const auto &NoUnwindAA =
2901         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2902     if (!NoUnwindAA.isAssumedNoUnwind())
2903       return false;
2904     if (!NoUnwindAA.isKnownNoUnwind())
2905       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2906 
2907     const auto &MemBehaviorAA =
2908         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2909     if (MemBehaviorAA.isAssumedReadOnly()) {
2910       if (!MemBehaviorAA.isKnownReadOnly())
2911         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2912       return true;
2913     }
2914     return false;
2915   }
2916 };
2917 
2918 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2919   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2920       : AAIsDeadValueImpl(IRP, A) {}
2921 
2922   /// See AbstractAttribute::initialize(...).
2923   void initialize(Attributor &A) override {
2924     if (isa<UndefValue>(getAssociatedValue())) {
2925       indicatePessimisticFixpoint();
2926       return;
2927     }
2928 
2929     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2930     if (!isAssumedSideEffectFree(A, I))
2931       indicatePessimisticFixpoint();
2932   }
2933 
2934   /// See AbstractAttribute::updateImpl(...).
2935   ChangeStatus updateImpl(Attributor &A) override {
2936     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2937     if (!isAssumedSideEffectFree(A, I))
2938       return indicatePessimisticFixpoint();
2939 
2940     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2941       return indicatePessimisticFixpoint();
2942     return ChangeStatus::UNCHANGED;
2943   }
2944 
2945   /// See AbstractAttribute::manifest(...).
2946   ChangeStatus manifest(Attributor &A) override {
2947     Value &V = getAssociatedValue();
2948     if (auto *I = dyn_cast<Instruction>(&V)) {
2949       // If we get here we basically know the users are all dead. We check if
2950       // isAssumedSideEffectFree returns true here again because it might not be
2951       // the case and only the users are dead but the instruction (=call) is
2952       // still needed.
2953       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2954         A.deleteAfterManifest(*I);
2955         return ChangeStatus::CHANGED;
2956       }
2957     }
2958     if (V.use_empty())
2959       return ChangeStatus::UNCHANGED;
2960 
2961     bool UsedAssumedInformation = false;
2962     Optional<Constant *> C =
2963         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2964     if (C.hasValue() && C.getValue())
2965       return ChangeStatus::UNCHANGED;
2966 
2967     // Replace the value with undef as it is dead but keep droppable uses around
2968     // as they provide information we don't want to give up on just yet.
2969     UndefValue &UV = *UndefValue::get(V.getType());
2970     bool AnyChange =
2971         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2972     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2973   }
2974 
2975   /// See AbstractAttribute::trackStatistics()
2976   void trackStatistics() const override {
2977     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2978   }
2979 };
2980 
2981 struct AAIsDeadArgument : public AAIsDeadFloating {
2982   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2983       : AAIsDeadFloating(IRP, A) {}
2984 
2985   /// See AbstractAttribute::initialize(...).
2986   void initialize(Attributor &A) override {
2987     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2988       indicatePessimisticFixpoint();
2989   }
2990 
2991   /// See AbstractAttribute::manifest(...).
2992   ChangeStatus manifest(Attributor &A) override {
2993     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2994     Argument &Arg = *getAssociatedArgument();
2995     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2996       if (A.registerFunctionSignatureRewrite(
2997               Arg, /* ReplacementTypes */ {},
2998               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2999               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
3000         Arg.dropDroppableUses();
3001         return ChangeStatus::CHANGED;
3002       }
3003     return Changed;
3004   }
3005 
3006   /// See AbstractAttribute::trackStatistics()
3007   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
3008 };
3009 
3010 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
3011   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
3012       : AAIsDeadValueImpl(IRP, A) {}
3013 
3014   /// See AbstractAttribute::initialize(...).
3015   void initialize(Attributor &A) override {
3016     if (isa<UndefValue>(getAssociatedValue()))
3017       indicatePessimisticFixpoint();
3018   }
3019 
3020   /// See AbstractAttribute::updateImpl(...).
3021   ChangeStatus updateImpl(Attributor &A) override {
3022     // TODO: Once we have call site specific value information we can provide
3023     //       call site specific liveness information and then it makes
3024     //       sense to specialize attributes for call sites arguments instead of
3025     //       redirecting requests to the callee argument.
3026     Argument *Arg = getAssociatedArgument();
3027     if (!Arg)
3028       return indicatePessimisticFixpoint();
3029     const IRPosition &ArgPos = IRPosition::argument(*Arg);
3030     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
3031     return clampStateAndIndicateChange(getState(), ArgAA.getState());
3032   }
3033 
3034   /// See AbstractAttribute::manifest(...).
3035   ChangeStatus manifest(Attributor &A) override {
3036     CallBase &CB = cast<CallBase>(getAnchorValue());
3037     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
3038     assert(!isa<UndefValue>(U.get()) &&
3039            "Expected undef values to be filtered out!");
3040     UndefValue &UV = *UndefValue::get(U->getType());
3041     if (A.changeUseAfterManifest(U, UV))
3042       return ChangeStatus::CHANGED;
3043     return ChangeStatus::UNCHANGED;
3044   }
3045 
3046   /// See AbstractAttribute::trackStatistics()
3047   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
3048 };
3049 
3050 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
3051   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
3052       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
3053 
3054   /// See AAIsDead::isAssumedDead().
3055   bool isAssumedDead() const override {
3056     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3057   }
3058 
3059   /// See AbstractAttribute::initialize(...).
3060   void initialize(Attributor &A) override {
3061     if (isa<UndefValue>(getAssociatedValue())) {
3062       indicatePessimisticFixpoint();
3063       return;
3064     }
3065 
3066     // We track this separately as a secondary state.
3067     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3068   }
3069 
3070   /// See AbstractAttribute::updateImpl(...).
3071   ChangeStatus updateImpl(Attributor &A) override {
3072     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3073     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3074       IsAssumedSideEffectFree = false;
3075       Changed = ChangeStatus::CHANGED;
3076     }
3077 
3078     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3079       return indicatePessimisticFixpoint();
3080     return Changed;
3081   }
3082 
3083   /// See AbstractAttribute::trackStatistics()
3084   void trackStatistics() const override {
3085     if (IsAssumedSideEffectFree)
3086       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3087     else
3088       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3089   }
3090 
3091   /// See AbstractAttribute::getAsStr().
3092   const std::string getAsStr() const override {
3093     return isAssumedDead()
3094                ? "assumed-dead"
3095                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3096   }
3097 
3098 private:
3099   bool IsAssumedSideEffectFree;
3100 };
3101 
3102 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3103   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3104       : AAIsDeadValueImpl(IRP, A) {}
3105 
3106   /// See AbstractAttribute::updateImpl(...).
3107   ChangeStatus updateImpl(Attributor &A) override {
3108 
3109     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3110                               {Instruction::Ret});
3111 
3112     auto PredForCallSite = [&](AbstractCallSite ACS) {
3113       if (ACS.isCallbackCall() || !ACS.getInstruction())
3114         return false;
3115       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3116     };
3117 
3118     bool AllCallSitesKnown;
3119     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3120                                 AllCallSitesKnown))
3121       return indicatePessimisticFixpoint();
3122 
3123     return ChangeStatus::UNCHANGED;
3124   }
3125 
3126   /// See AbstractAttribute::manifest(...).
3127   ChangeStatus manifest(Attributor &A) override {
3128     // TODO: Rewrite the signature to return void?
3129     bool AnyChange = false;
3130     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3131     auto RetInstPred = [&](Instruction &I) {
3132       ReturnInst &RI = cast<ReturnInst>(I);
3133       if (!isa<UndefValue>(RI.getReturnValue()))
3134         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3135       return true;
3136     };
3137     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3138     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3139   }
3140 
3141   /// See AbstractAttribute::trackStatistics()
3142   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3143 };
3144 
3145 struct AAIsDeadFunction : public AAIsDead {
3146   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3147 
3148   /// See AbstractAttribute::initialize(...).
3149   void initialize(Attributor &A) override {
3150     const Function *F = getAnchorScope();
3151     if (F && !F->isDeclaration()) {
3152       // We only want to compute liveness once. If the function is not part of
3153       // the SCC, skip it.
3154       if (A.isRunOn(*const_cast<Function *>(F))) {
3155         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3156         assumeLive(A, F->getEntryBlock());
3157       } else {
3158         indicatePessimisticFixpoint();
3159       }
3160     }
3161   }
3162 
3163   /// See AbstractAttribute::getAsStr().
3164   const std::string getAsStr() const override {
3165     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3166            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3167            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3168            std::to_string(KnownDeadEnds.size()) + "]";
3169   }
3170 
3171   /// See AbstractAttribute::manifest(...).
3172   ChangeStatus manifest(Attributor &A) override {
3173     assert(getState().isValidState() &&
3174            "Attempted to manifest an invalid state!");
3175 
3176     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3177     Function &F = *getAnchorScope();
3178 
3179     if (AssumedLiveBlocks.empty()) {
3180       A.deleteAfterManifest(F);
3181       return ChangeStatus::CHANGED;
3182     }
3183 
3184     // Flag to determine if we can change an invoke to a call assuming the
3185     // callee is nounwind. This is not possible if the personality of the
3186     // function allows to catch asynchronous exceptions.
3187     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3188 
3189     KnownDeadEnds.set_union(ToBeExploredFrom);
3190     for (const Instruction *DeadEndI : KnownDeadEnds) {
3191       auto *CB = dyn_cast<CallBase>(DeadEndI);
3192       if (!CB)
3193         continue;
3194       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3195           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3196       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3197       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3198         continue;
3199 
3200       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3201         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3202       else
3203         A.changeToUnreachableAfterManifest(
3204             const_cast<Instruction *>(DeadEndI->getNextNode()));
3205       HasChanged = ChangeStatus::CHANGED;
3206     }
3207 
3208     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3209     for (BasicBlock &BB : F)
3210       if (!AssumedLiveBlocks.count(&BB)) {
3211         A.deleteAfterManifest(BB);
3212         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3213       }
3214 
3215     return HasChanged;
3216   }
3217 
3218   /// See AbstractAttribute::updateImpl(...).
3219   ChangeStatus updateImpl(Attributor &A) override;
3220 
3221   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3222     return !AssumedLiveEdges.count(std::make_pair(From, To));
3223   }
3224 
3225   /// See AbstractAttribute::trackStatistics()
3226   void trackStatistics() const override {}
3227 
3228   /// Returns true if the function is assumed dead.
3229   bool isAssumedDead() const override { return false; }
3230 
3231   /// See AAIsDead::isKnownDead().
3232   bool isKnownDead() const override { return false; }
3233 
3234   /// See AAIsDead::isAssumedDead(BasicBlock *).
3235   bool isAssumedDead(const BasicBlock *BB) const override {
3236     assert(BB->getParent() == getAnchorScope() &&
3237            "BB must be in the same anchor scope function.");
3238 
3239     if (!getAssumed())
3240       return false;
3241     return !AssumedLiveBlocks.count(BB);
3242   }
3243 
3244   /// See AAIsDead::isKnownDead(BasicBlock *).
3245   bool isKnownDead(const BasicBlock *BB) const override {
3246     return getKnown() && isAssumedDead(BB);
3247   }
3248 
3249   /// See AAIsDead::isAssumed(Instruction *I).
3250   bool isAssumedDead(const Instruction *I) const override {
3251     assert(I->getParent()->getParent() == getAnchorScope() &&
3252            "Instruction must be in the same anchor scope function.");
3253 
3254     if (!getAssumed())
3255       return false;
3256 
3257     // If it is not in AssumedLiveBlocks then it for sure dead.
3258     // Otherwise, it can still be after noreturn call in a live block.
3259     if (!AssumedLiveBlocks.count(I->getParent()))
3260       return true;
3261 
3262     // If it is not after a liveness barrier it is live.
3263     const Instruction *PrevI = I->getPrevNode();
3264     while (PrevI) {
3265       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3266         return true;
3267       PrevI = PrevI->getPrevNode();
3268     }
3269     return false;
3270   }
3271 
3272   /// See AAIsDead::isKnownDead(Instruction *I).
3273   bool isKnownDead(const Instruction *I) const override {
3274     return getKnown() && isAssumedDead(I);
3275   }
3276 
3277   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3278   /// that internal function called from \p BB should now be looked at.
3279   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3280     if (!AssumedLiveBlocks.insert(&BB).second)
3281       return false;
3282 
3283     // We assume that all of BB is (probably) live now and if there are calls to
3284     // internal functions we will assume that those are now live as well. This
3285     // is a performance optimization for blocks with calls to a lot of internal
3286     // functions. It can however cause dead functions to be treated as live.
3287     for (const Instruction &I : BB)
3288       if (const auto *CB = dyn_cast<CallBase>(&I))
3289         if (const Function *F = CB->getCalledFunction())
3290           if (F->hasLocalLinkage())
3291             A.markLiveInternalFunction(*F);
3292     return true;
3293   }
3294 
3295   /// Collection of instructions that need to be explored again, e.g., we
3296   /// did assume they do not transfer control to (one of their) successors.
3297   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3298 
3299   /// Collection of instructions that are known to not transfer control.
3300   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3301 
3302   /// Collection of all assumed live edges
3303   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3304 
3305   /// Collection of all assumed live BasicBlocks.
3306   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3307 };
3308 
3309 static bool
3310 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3311                         AbstractAttribute &AA,
3312                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3313   const IRPosition &IPos = IRPosition::callsite_function(CB);
3314 
3315   const auto &NoReturnAA =
3316       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3317   if (NoReturnAA.isAssumedNoReturn())
3318     return !NoReturnAA.isKnownNoReturn();
3319   if (CB.isTerminator())
3320     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3321   else
3322     AliveSuccessors.push_back(CB.getNextNode());
3323   return false;
3324 }
3325 
3326 static bool
3327 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3328                         AbstractAttribute &AA,
3329                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3330   bool UsedAssumedInformation =
3331       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3332 
3333   // First, determine if we can change an invoke to a call assuming the
3334   // callee is nounwind. This is not possible if the personality of the
3335   // function allows to catch asynchronous exceptions.
3336   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3337     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3338   } else {
3339     const IRPosition &IPos = IRPosition::callsite_function(II);
3340     const auto &AANoUnw =
3341         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3342     if (AANoUnw.isAssumedNoUnwind()) {
3343       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3344     } else {
3345       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3346     }
3347   }
3348   return UsedAssumedInformation;
3349 }
3350 
3351 static bool
3352 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3353                         AbstractAttribute &AA,
3354                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3355   bool UsedAssumedInformation = false;
3356   if (BI.getNumSuccessors() == 1) {
3357     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3358   } else {
3359     Optional<ConstantInt *> CI = getAssumedConstantInt(
3360         A, *BI.getCondition(), AA, UsedAssumedInformation);
3361     if (!CI.hasValue()) {
3362       // No value yet, assume both edges are dead.
3363     } else if (CI.getValue()) {
3364       const BasicBlock *SuccBB =
3365           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3366       AliveSuccessors.push_back(&SuccBB->front());
3367     } else {
3368       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3369       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3370       UsedAssumedInformation = false;
3371     }
3372   }
3373   return UsedAssumedInformation;
3374 }
3375 
3376 static bool
3377 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3378                         AbstractAttribute &AA,
3379                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3380   bool UsedAssumedInformation = false;
3381   Optional<ConstantInt *> CI =
3382       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3383   if (!CI.hasValue()) {
3384     // No value yet, assume all edges are dead.
3385   } else if (CI.getValue()) {
3386     for (auto &CaseIt : SI.cases()) {
3387       if (CaseIt.getCaseValue() == CI.getValue()) {
3388         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3389         return UsedAssumedInformation;
3390       }
3391     }
3392     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3393     return UsedAssumedInformation;
3394   } else {
3395     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3396       AliveSuccessors.push_back(&SuccBB->front());
3397   }
3398   return UsedAssumedInformation;
3399 }
3400 
3401 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3402   ChangeStatus Change = ChangeStatus::UNCHANGED;
3403 
3404   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3405                     << getAnchorScope()->size() << "] BBs and "
3406                     << ToBeExploredFrom.size() << " exploration points and "
3407                     << KnownDeadEnds.size() << " known dead ends\n");
3408 
3409   // Copy and clear the list of instructions we need to explore from. It is
3410   // refilled with instructions the next update has to look at.
3411   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3412                                                ToBeExploredFrom.end());
3413   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3414 
3415   SmallVector<const Instruction *, 8> AliveSuccessors;
3416   while (!Worklist.empty()) {
3417     const Instruction *I = Worklist.pop_back_val();
3418     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3419 
3420     // Fast forward for uninteresting instructions. We could look for UB here
3421     // though.
3422     while (!I->isTerminator() && !isa<CallBase>(I)) {
3423       Change = ChangeStatus::CHANGED;
3424       I = I->getNextNode();
3425     }
3426 
3427     AliveSuccessors.clear();
3428 
3429     bool UsedAssumedInformation = false;
3430     switch (I->getOpcode()) {
3431     // TODO: look for (assumed) UB to backwards propagate "deadness".
3432     default:
3433       assert(I->isTerminator() &&
3434              "Expected non-terminators to be handled already!");
3435       for (const BasicBlock *SuccBB : successors(I->getParent()))
3436         AliveSuccessors.push_back(&SuccBB->front());
3437       break;
3438     case Instruction::Call:
3439       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3440                                                        *this, AliveSuccessors);
3441       break;
3442     case Instruction::Invoke:
3443       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3444                                                        *this, AliveSuccessors);
3445       break;
3446     case Instruction::Br:
3447       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3448                                                        *this, AliveSuccessors);
3449       break;
3450     case Instruction::Switch:
3451       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3452                                                        *this, AliveSuccessors);
3453       break;
3454     }
3455 
3456     if (UsedAssumedInformation) {
3457       NewToBeExploredFrom.insert(I);
3458     } else {
3459       Change = ChangeStatus::CHANGED;
3460       if (AliveSuccessors.empty() ||
3461           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3462         KnownDeadEnds.insert(I);
3463     }
3464 
3465     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3466                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3467                       << UsedAssumedInformation << "\n");
3468 
3469     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3470       if (!I->isTerminator()) {
3471         assert(AliveSuccessors.size() == 1 &&
3472                "Non-terminator expected to have a single successor!");
3473         Worklist.push_back(AliveSuccessor);
3474       } else {
3475         // record the assumed live edge
3476         AssumedLiveEdges.insert(
3477             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3478         if (assumeLive(A, *AliveSuccessor->getParent()))
3479           Worklist.push_back(AliveSuccessor);
3480       }
3481     }
3482   }
3483 
3484   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3485 
3486   // If we know everything is live there is no need to query for liveness.
3487   // Instead, indicating a pessimistic fixpoint will cause the state to be
3488   // "invalid" and all queries to be answered conservatively without lookups.
3489   // To be in this state we have to (1) finished the exploration and (3) not
3490   // discovered any non-trivial dead end and (2) not ruled unreachable code
3491   // dead.
3492   if (ToBeExploredFrom.empty() &&
3493       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3494       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3495         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3496       }))
3497     return indicatePessimisticFixpoint();
3498   return Change;
3499 }
3500 
3501 /// Liveness information for a call sites.
3502 struct AAIsDeadCallSite final : AAIsDeadFunction {
3503   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3504       : AAIsDeadFunction(IRP, A) {}
3505 
3506   /// See AbstractAttribute::initialize(...).
3507   void initialize(Attributor &A) override {
3508     // TODO: Once we have call site specific value information we can provide
3509     //       call site specific liveness information and then it makes
3510     //       sense to specialize attributes for call sites instead of
3511     //       redirecting requests to the callee.
3512     llvm_unreachable("Abstract attributes for liveness are not "
3513                      "supported for call sites yet!");
3514   }
3515 
3516   /// See AbstractAttribute::updateImpl(...).
3517   ChangeStatus updateImpl(Attributor &A) override {
3518     return indicatePessimisticFixpoint();
3519   }
3520 
3521   /// See AbstractAttribute::trackStatistics()
3522   void trackStatistics() const override {}
3523 };
3524 
3525 /// -------------------- Dereferenceable Argument Attribute --------------------
3526 
3527 template <>
3528 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3529                                                      const DerefState &R) {
3530   ChangeStatus CS0 =
3531       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3532   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3533   return CS0 | CS1;
3534 }
3535 
3536 struct AADereferenceableImpl : AADereferenceable {
3537   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3538       : AADereferenceable(IRP, A) {}
3539   using StateType = DerefState;
3540 
3541   /// See AbstractAttribute::initialize(...).
3542   void initialize(Attributor &A) override {
3543     SmallVector<Attribute, 4> Attrs;
3544     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3545              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3546     for (const Attribute &Attr : Attrs)
3547       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3548 
3549     const IRPosition &IRP = this->getIRPosition();
3550     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3551 
3552     bool CanBeNull, CanBeFreed;
3553     takeKnownDerefBytesMaximum(
3554         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3555             A.getDataLayout(), CanBeNull, CanBeFreed));
3556 
3557     bool IsFnInterface = IRP.isFnInterfaceKind();
3558     Function *FnScope = IRP.getAnchorScope();
3559     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3560       indicatePessimisticFixpoint();
3561       return;
3562     }
3563 
3564     if (Instruction *CtxI = getCtxI())
3565       followUsesInMBEC(*this, A, getState(), *CtxI);
3566   }
3567 
3568   /// See AbstractAttribute::getState()
3569   /// {
3570   StateType &getState() override { return *this; }
3571   const StateType &getState() const override { return *this; }
3572   /// }
3573 
3574   /// Helper function for collecting accessed bytes in must-be-executed-context
3575   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3576                               DerefState &State) {
3577     const Value *UseV = U->get();
3578     if (!UseV->getType()->isPointerTy())
3579       return;
3580 
3581     Type *PtrTy = UseV->getType();
3582     const DataLayout &DL = A.getDataLayout();
3583     int64_t Offset;
3584     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3585             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3586       if (Base == &getAssociatedValue() &&
3587           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3588         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3589         State.addAccessedBytes(Offset, Size);
3590       }
3591     }
3592   }
3593 
3594   /// See followUsesInMBEC
3595   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3596                        AADereferenceable::StateType &State) {
3597     bool IsNonNull = false;
3598     bool TrackUse = false;
3599     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3600         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3601     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3602                       << " for instruction " << *I << "\n");
3603 
3604     addAccessedBytesForUse(A, U, I, State);
3605     State.takeKnownDerefBytesMaximum(DerefBytes);
3606     return TrackUse;
3607   }
3608 
3609   /// See AbstractAttribute::manifest(...).
3610   ChangeStatus manifest(Attributor &A) override {
3611     ChangeStatus Change = AADereferenceable::manifest(A);
3612     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3613       removeAttrs({Attribute::DereferenceableOrNull});
3614       return ChangeStatus::CHANGED;
3615     }
3616     return Change;
3617   }
3618 
3619   void getDeducedAttributes(LLVMContext &Ctx,
3620                             SmallVectorImpl<Attribute> &Attrs) const override {
3621     // TODO: Add *_globally support
3622     if (isAssumedNonNull())
3623       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3624           Ctx, getAssumedDereferenceableBytes()));
3625     else
3626       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3627           Ctx, getAssumedDereferenceableBytes()));
3628   }
3629 
3630   /// See AbstractAttribute::getAsStr().
3631   const std::string getAsStr() const override {
3632     if (!getAssumedDereferenceableBytes())
3633       return "unknown-dereferenceable";
3634     return std::string("dereferenceable") +
3635            (isAssumedNonNull() ? "" : "_or_null") +
3636            (isAssumedGlobal() ? "_globally" : "") + "<" +
3637            std::to_string(getKnownDereferenceableBytes()) + "-" +
3638            std::to_string(getAssumedDereferenceableBytes()) + ">";
3639   }
3640 };
3641 
3642 /// Dereferenceable attribute for a floating value.
3643 struct AADereferenceableFloating : AADereferenceableImpl {
3644   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3645       : AADereferenceableImpl(IRP, A) {}
3646 
3647   /// See AbstractAttribute::updateImpl(...).
3648   ChangeStatus updateImpl(Attributor &A) override {
3649     const DataLayout &DL = A.getDataLayout();
3650 
3651     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3652                             bool Stripped) -> bool {
3653       unsigned IdxWidth =
3654           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3655       APInt Offset(IdxWidth, 0);
3656       const Value *Base =
3657           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3658 
3659       const auto &AA = A.getAAFor<AADereferenceable>(
3660           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3661       int64_t DerefBytes = 0;
3662       if (!Stripped && this == &AA) {
3663         // Use IR information if we did not strip anything.
3664         // TODO: track globally.
3665         bool CanBeNull, CanBeFreed;
3666         DerefBytes =
3667           Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3668         T.GlobalState.indicatePessimisticFixpoint();
3669       } else {
3670         const DerefState &DS = AA.getState();
3671         DerefBytes = DS.DerefBytesState.getAssumed();
3672         T.GlobalState &= DS.GlobalState;
3673       }
3674 
3675       // For now we do not try to "increase" dereferenceability due to negative
3676       // indices as we first have to come up with code to deal with loops and
3677       // for overflows of the dereferenceable bytes.
3678       int64_t OffsetSExt = Offset.getSExtValue();
3679       if (OffsetSExt < 0)
3680         OffsetSExt = 0;
3681 
3682       T.takeAssumedDerefBytesMinimum(
3683           std::max(int64_t(0), DerefBytes - OffsetSExt));
3684 
3685       if (this == &AA) {
3686         if (!Stripped) {
3687           // If nothing was stripped IR information is all we got.
3688           T.takeKnownDerefBytesMaximum(
3689               std::max(int64_t(0), DerefBytes - OffsetSExt));
3690           T.indicatePessimisticFixpoint();
3691         } else if (OffsetSExt > 0) {
3692           // If something was stripped but there is circular reasoning we look
3693           // for the offset. If it is positive we basically decrease the
3694           // dereferenceable bytes in a circluar loop now, which will simply
3695           // drive them down to the known value in a very slow way which we
3696           // can accelerate.
3697           T.indicatePessimisticFixpoint();
3698         }
3699       }
3700 
3701       return T.isValidState();
3702     };
3703 
3704     DerefState T;
3705     if (!genericValueTraversal<AADereferenceable, DerefState>(
3706             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3707       return indicatePessimisticFixpoint();
3708 
3709     return clampStateAndIndicateChange(getState(), T);
3710   }
3711 
3712   /// See AbstractAttribute::trackStatistics()
3713   void trackStatistics() const override {
3714     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3715   }
3716 };
3717 
3718 /// Dereferenceable attribute for a return value.
3719 struct AADereferenceableReturned final
3720     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3721   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3722       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3723             IRP, A) {}
3724 
3725   /// See AbstractAttribute::trackStatistics()
3726   void trackStatistics() const override {
3727     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3728   }
3729 };
3730 
3731 /// Dereferenceable attribute for an argument
3732 struct AADereferenceableArgument final
3733     : AAArgumentFromCallSiteArguments<AADereferenceable,
3734                                       AADereferenceableImpl> {
3735   using Base =
3736       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3737   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3738       : Base(IRP, A) {}
3739 
3740   /// See AbstractAttribute::trackStatistics()
3741   void trackStatistics() const override {
3742     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3743   }
3744 };
3745 
3746 /// Dereferenceable attribute for a call site argument.
3747 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3748   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3749       : AADereferenceableFloating(IRP, A) {}
3750 
3751   /// See AbstractAttribute::trackStatistics()
3752   void trackStatistics() const override {
3753     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3754   }
3755 };
3756 
3757 /// Dereferenceable attribute deduction for a call site return value.
3758 struct AADereferenceableCallSiteReturned final
3759     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3760   using Base =
3761       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3762   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3763       : Base(IRP, A) {}
3764 
3765   /// See AbstractAttribute::trackStatistics()
3766   void trackStatistics() const override {
3767     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3768   }
3769 };
3770 
3771 // ------------------------ Align Argument Attribute ------------------------
3772 
3773 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3774                                     Value &AssociatedValue, const Use *U,
3775                                     const Instruction *I, bool &TrackUse) {
3776   // We need to follow common pointer manipulation uses to the accesses they
3777   // feed into.
3778   if (isa<CastInst>(I)) {
3779     // Follow all but ptr2int casts.
3780     TrackUse = !isa<PtrToIntInst>(I);
3781     return 0;
3782   }
3783   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3784     if (GEP->hasAllConstantIndices())
3785       TrackUse = true;
3786     return 0;
3787   }
3788 
3789   MaybeAlign MA;
3790   if (const auto *CB = dyn_cast<CallBase>(I)) {
3791     if (CB->isBundleOperand(U) || CB->isCallee(U))
3792       return 0;
3793 
3794     unsigned ArgNo = CB->getArgOperandNo(U);
3795     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3796     // As long as we only use known information there is no need to track
3797     // dependences here.
3798     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3799     MA = MaybeAlign(AlignAA.getKnownAlign());
3800   }
3801 
3802   const DataLayout &DL = A.getDataLayout();
3803   const Value *UseV = U->get();
3804   if (auto *SI = dyn_cast<StoreInst>(I)) {
3805     if (SI->getPointerOperand() == UseV)
3806       MA = SI->getAlign();
3807   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3808     if (LI->getPointerOperand() == UseV)
3809       MA = LI->getAlign();
3810   }
3811 
3812   if (!MA || *MA <= QueryingAA.getKnownAlign())
3813     return 0;
3814 
3815   unsigned Alignment = MA->value();
3816   int64_t Offset;
3817 
3818   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3819     if (Base == &AssociatedValue) {
3820       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3821       // So we can say that the maximum power of two which is a divisor of
3822       // gcd(Offset, Alignment) is an alignment.
3823 
3824       uint32_t gcd =
3825           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3826       Alignment = llvm::PowerOf2Floor(gcd);
3827     }
3828   }
3829 
3830   return Alignment;
3831 }
3832 
3833 struct AAAlignImpl : AAAlign {
3834   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3835 
3836   /// See AbstractAttribute::initialize(...).
3837   void initialize(Attributor &A) override {
3838     SmallVector<Attribute, 4> Attrs;
3839     getAttrs({Attribute::Alignment}, Attrs);
3840     for (const Attribute &Attr : Attrs)
3841       takeKnownMaximum(Attr.getValueAsInt());
3842 
3843     Value &V = getAssociatedValue();
3844     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3845     //       use of the function pointer. This was caused by D73131. We want to
3846     //       avoid this for function pointers especially because we iterate
3847     //       their uses and int2ptr is not handled. It is not a correctness
3848     //       problem though!
3849     if (!V.getType()->getPointerElementType()->isFunctionTy())
3850       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3851 
3852     if (getIRPosition().isFnInterfaceKind() &&
3853         (!getAnchorScope() ||
3854          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3855       indicatePessimisticFixpoint();
3856       return;
3857     }
3858 
3859     if (Instruction *CtxI = getCtxI())
3860       followUsesInMBEC(*this, A, getState(), *CtxI);
3861   }
3862 
3863   /// See AbstractAttribute::manifest(...).
3864   ChangeStatus manifest(Attributor &A) override {
3865     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3866 
3867     // Check for users that allow alignment annotations.
3868     Value &AssociatedValue = getAssociatedValue();
3869     for (const Use &U : AssociatedValue.uses()) {
3870       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3871         if (SI->getPointerOperand() == &AssociatedValue)
3872           if (SI->getAlignment() < getAssumedAlign()) {
3873             STATS_DECLTRACK(AAAlign, Store,
3874                             "Number of times alignment added to a store");
3875             SI->setAlignment(Align(getAssumedAlign()));
3876             LoadStoreChanged = ChangeStatus::CHANGED;
3877           }
3878       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3879         if (LI->getPointerOperand() == &AssociatedValue)
3880           if (LI->getAlignment() < getAssumedAlign()) {
3881             LI->setAlignment(Align(getAssumedAlign()));
3882             STATS_DECLTRACK(AAAlign, Load,
3883                             "Number of times alignment added to a load");
3884             LoadStoreChanged = ChangeStatus::CHANGED;
3885           }
3886       }
3887     }
3888 
3889     ChangeStatus Changed = AAAlign::manifest(A);
3890 
3891     Align InheritAlign =
3892         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3893     if (InheritAlign >= getAssumedAlign())
3894       return LoadStoreChanged;
3895     return Changed | LoadStoreChanged;
3896   }
3897 
3898   // TODO: Provide a helper to determine the implied ABI alignment and check in
3899   //       the existing manifest method and a new one for AAAlignImpl that value
3900   //       to avoid making the alignment explicit if it did not improve.
3901 
3902   /// See AbstractAttribute::getDeducedAttributes
3903   virtual void
3904   getDeducedAttributes(LLVMContext &Ctx,
3905                        SmallVectorImpl<Attribute> &Attrs) const override {
3906     if (getAssumedAlign() > 1)
3907       Attrs.emplace_back(
3908           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3909   }
3910 
3911   /// See followUsesInMBEC
3912   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3913                        AAAlign::StateType &State) {
3914     bool TrackUse = false;
3915 
3916     unsigned int KnownAlign =
3917         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3918     State.takeKnownMaximum(KnownAlign);
3919 
3920     return TrackUse;
3921   }
3922 
3923   /// See AbstractAttribute::getAsStr().
3924   const std::string getAsStr() const override {
3925     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3926                                 "-" + std::to_string(getAssumedAlign()) + ">")
3927                              : "unknown-align";
3928   }
3929 };
3930 
3931 /// Align attribute for a floating value.
3932 struct AAAlignFloating : AAAlignImpl {
3933   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3934 
3935   /// See AbstractAttribute::updateImpl(...).
3936   ChangeStatus updateImpl(Attributor &A) override {
3937     const DataLayout &DL = A.getDataLayout();
3938 
3939     auto VisitValueCB = [&](Value &V, const Instruction *,
3940                             AAAlign::StateType &T, bool Stripped) -> bool {
3941       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3942                                            DepClassTy::REQUIRED);
3943       if (!Stripped && this == &AA) {
3944         int64_t Offset;
3945         unsigned Alignment = 1;
3946         if (const Value *Base =
3947                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3948           Align PA = Base->getPointerAlignment(DL);
3949           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3950           // So we can say that the maximum power of two which is a divisor of
3951           // gcd(Offset, Alignment) is an alignment.
3952 
3953           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3954                                                uint32_t(PA.value()));
3955           Alignment = llvm::PowerOf2Floor(gcd);
3956         } else {
3957           Alignment = V.getPointerAlignment(DL).value();
3958         }
3959         // Use only IR information if we did not strip anything.
3960         T.takeKnownMaximum(Alignment);
3961         T.indicatePessimisticFixpoint();
3962       } else {
3963         // Use abstract attribute information.
3964         const AAAlign::StateType &DS = AA.getState();
3965         T ^= DS;
3966       }
3967       return T.isValidState();
3968     };
3969 
3970     StateType T;
3971     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3972                                                    VisitValueCB, getCtxI()))
3973       return indicatePessimisticFixpoint();
3974 
3975     // TODO: If we know we visited all incoming values, thus no are assumed
3976     // dead, we can take the known information from the state T.
3977     return clampStateAndIndicateChange(getState(), T);
3978   }
3979 
3980   /// See AbstractAttribute::trackStatistics()
3981   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3982 };
3983 
3984 /// Align attribute for function return value.
3985 struct AAAlignReturned final
3986     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3987   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3988   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3989 
3990   /// See AbstractAttribute::initialize(...).
3991   void initialize(Attributor &A) override {
3992     Base::initialize(A);
3993     Function *F = getAssociatedFunction();
3994     if (!F || F->isDeclaration())
3995       indicatePessimisticFixpoint();
3996   }
3997 
3998   /// See AbstractAttribute::trackStatistics()
3999   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
4000 };
4001 
4002 /// Align attribute for function argument.
4003 struct AAAlignArgument final
4004     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
4005   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
4006   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
4007 
4008   /// See AbstractAttribute::manifest(...).
4009   ChangeStatus manifest(Attributor &A) override {
4010     // If the associated argument is involved in a must-tail call we give up
4011     // because we would need to keep the argument alignments of caller and
4012     // callee in-sync. Just does not seem worth the trouble right now.
4013     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
4014       return ChangeStatus::UNCHANGED;
4015     return Base::manifest(A);
4016   }
4017 
4018   /// See AbstractAttribute::trackStatistics()
4019   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
4020 };
4021 
4022 struct AAAlignCallSiteArgument final : AAAlignFloating {
4023   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
4024       : AAAlignFloating(IRP, A) {}
4025 
4026   /// See AbstractAttribute::manifest(...).
4027   ChangeStatus manifest(Attributor &A) override {
4028     // If the associated argument is involved in a must-tail call we give up
4029     // because we would need to keep the argument alignments of caller and
4030     // callee in-sync. Just does not seem worth the trouble right now.
4031     if (Argument *Arg = getAssociatedArgument())
4032       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
4033         return ChangeStatus::UNCHANGED;
4034     ChangeStatus Changed = AAAlignImpl::manifest(A);
4035     Align InheritAlign =
4036         getAssociatedValue().getPointerAlignment(A.getDataLayout());
4037     if (InheritAlign >= getAssumedAlign())
4038       Changed = ChangeStatus::UNCHANGED;
4039     return Changed;
4040   }
4041 
4042   /// See AbstractAttribute::updateImpl(Attributor &A).
4043   ChangeStatus updateImpl(Attributor &A) override {
4044     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
4045     if (Argument *Arg = getAssociatedArgument()) {
4046       // We only take known information from the argument
4047       // so we do not need to track a dependence.
4048       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
4049           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
4050       takeKnownMaximum(ArgAlignAA.getKnownAlign());
4051     }
4052     return Changed;
4053   }
4054 
4055   /// See AbstractAttribute::trackStatistics()
4056   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4057 };
4058 
4059 /// Align attribute deduction for a call site return value.
4060 struct AAAlignCallSiteReturned final
4061     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4062   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4063   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4064       : Base(IRP, A) {}
4065 
4066   /// See AbstractAttribute::initialize(...).
4067   void initialize(Attributor &A) override {
4068     Base::initialize(A);
4069     Function *F = getAssociatedFunction();
4070     if (!F || F->isDeclaration())
4071       indicatePessimisticFixpoint();
4072   }
4073 
4074   /// See AbstractAttribute::trackStatistics()
4075   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4076 };
4077 
4078 /// ------------------ Function No-Return Attribute ----------------------------
4079 struct AANoReturnImpl : public AANoReturn {
4080   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4081 
4082   /// See AbstractAttribute::initialize(...).
4083   void initialize(Attributor &A) override {
4084     AANoReturn::initialize(A);
4085     Function *F = getAssociatedFunction();
4086     if (!F || F->isDeclaration())
4087       indicatePessimisticFixpoint();
4088   }
4089 
4090   /// See AbstractAttribute::getAsStr().
4091   const std::string getAsStr() const override {
4092     return getAssumed() ? "noreturn" : "may-return";
4093   }
4094 
4095   /// See AbstractAttribute::updateImpl(Attributor &A).
4096   virtual ChangeStatus updateImpl(Attributor &A) override {
4097     auto CheckForNoReturn = [](Instruction &) { return false; };
4098     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4099                                    {(unsigned)Instruction::Ret}))
4100       return indicatePessimisticFixpoint();
4101     return ChangeStatus::UNCHANGED;
4102   }
4103 };
4104 
4105 struct AANoReturnFunction final : AANoReturnImpl {
4106   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4107       : AANoReturnImpl(IRP, A) {}
4108 
4109   /// See AbstractAttribute::trackStatistics()
4110   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4111 };
4112 
4113 /// NoReturn attribute deduction for a call sites.
4114 struct AANoReturnCallSite final : AANoReturnImpl {
4115   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4116       : AANoReturnImpl(IRP, A) {}
4117 
4118   /// See AbstractAttribute::initialize(...).
4119   void initialize(Attributor &A) override {
4120     AANoReturnImpl::initialize(A);
4121     if (Function *F = getAssociatedFunction()) {
4122       const IRPosition &FnPos = IRPosition::function(*F);
4123       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4124       if (!FnAA.isAssumedNoReturn())
4125         indicatePessimisticFixpoint();
4126     }
4127   }
4128 
4129   /// See AbstractAttribute::updateImpl(...).
4130   ChangeStatus updateImpl(Attributor &A) override {
4131     // TODO: Once we have call site specific value information we can provide
4132     //       call site specific liveness information and then it makes
4133     //       sense to specialize attributes for call sites arguments instead of
4134     //       redirecting requests to the callee argument.
4135     Function *F = getAssociatedFunction();
4136     const IRPosition &FnPos = IRPosition::function(*F);
4137     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4138     return clampStateAndIndicateChange(getState(), FnAA.getState());
4139   }
4140 
4141   /// See AbstractAttribute::trackStatistics()
4142   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4143 };
4144 
4145 /// ----------------------- Variable Capturing ---------------------------------
4146 
4147 /// A class to hold the state of for no-capture attributes.
4148 struct AANoCaptureImpl : public AANoCapture {
4149   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4150 
4151   /// See AbstractAttribute::initialize(...).
4152   void initialize(Attributor &A) override {
4153     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4154       indicateOptimisticFixpoint();
4155       return;
4156     }
4157     Function *AnchorScope = getAnchorScope();
4158     if (isFnInterfaceKind() &&
4159         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4160       indicatePessimisticFixpoint();
4161       return;
4162     }
4163 
4164     // You cannot "capture" null in the default address space.
4165     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4166         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4167       indicateOptimisticFixpoint();
4168       return;
4169     }
4170 
4171     const Function *F =
4172         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4173 
4174     // Check what state the associated function can actually capture.
4175     if (F)
4176       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4177     else
4178       indicatePessimisticFixpoint();
4179   }
4180 
4181   /// See AbstractAttribute::updateImpl(...).
4182   ChangeStatus updateImpl(Attributor &A) override;
4183 
4184   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4185   virtual void
4186   getDeducedAttributes(LLVMContext &Ctx,
4187                        SmallVectorImpl<Attribute> &Attrs) const override {
4188     if (!isAssumedNoCaptureMaybeReturned())
4189       return;
4190 
4191     if (isArgumentPosition()) {
4192       if (isAssumedNoCapture())
4193         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4194       else if (ManifestInternal)
4195         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4196     }
4197   }
4198 
4199   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4200   /// depending on the ability of the function associated with \p IRP to capture
4201   /// state in memory and through "returning/throwing", respectively.
4202   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4203                                                    const Function &F,
4204                                                    BitIntegerState &State) {
4205     // TODO: Once we have memory behavior attributes we should use them here.
4206 
4207     // If we know we cannot communicate or write to memory, we do not care about
4208     // ptr2int anymore.
4209     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4210         F.getReturnType()->isVoidTy()) {
4211       State.addKnownBits(NO_CAPTURE);
4212       return;
4213     }
4214 
4215     // A function cannot capture state in memory if it only reads memory, it can
4216     // however return/throw state and the state might be influenced by the
4217     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4218     if (F.onlyReadsMemory())
4219       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4220 
4221     // A function cannot communicate state back if it does not through
4222     // exceptions and doesn not return values.
4223     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4224       State.addKnownBits(NOT_CAPTURED_IN_RET);
4225 
4226     // Check existing "returned" attributes.
4227     int ArgNo = IRP.getCalleeArgNo();
4228     if (F.doesNotThrow() && ArgNo >= 0) {
4229       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4230         if (F.hasParamAttribute(u, Attribute::Returned)) {
4231           if (u == unsigned(ArgNo))
4232             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4233           else if (F.onlyReadsMemory())
4234             State.addKnownBits(NO_CAPTURE);
4235           else
4236             State.addKnownBits(NOT_CAPTURED_IN_RET);
4237           break;
4238         }
4239     }
4240   }
4241 
4242   /// See AbstractState::getAsStr().
4243   const std::string getAsStr() const override {
4244     if (isKnownNoCapture())
4245       return "known not-captured";
4246     if (isAssumedNoCapture())
4247       return "assumed not-captured";
4248     if (isKnownNoCaptureMaybeReturned())
4249       return "known not-captured-maybe-returned";
4250     if (isAssumedNoCaptureMaybeReturned())
4251       return "assumed not-captured-maybe-returned";
4252     return "assumed-captured";
4253   }
4254 };
4255 
4256 /// Attributor-aware capture tracker.
4257 struct AACaptureUseTracker final : public CaptureTracker {
4258 
4259   /// Create a capture tracker that can lookup in-flight abstract attributes
4260   /// through the Attributor \p A.
4261   ///
4262   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4263   /// search is stopped. If a use leads to a return instruction,
4264   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4265   /// If a use leads to a ptr2int which may capture the value,
4266   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4267   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4268   /// set. All values in \p PotentialCopies are later tracked as well. For every
4269   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4270   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4271   /// conservatively set to true.
4272   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4273                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4274                       SmallVectorImpl<const Value *> &PotentialCopies,
4275                       unsigned &RemainingUsesToExplore)
4276       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4277         PotentialCopies(PotentialCopies),
4278         RemainingUsesToExplore(RemainingUsesToExplore) {}
4279 
4280   /// Determine if \p V maybe captured. *Also updates the state!*
4281   bool valueMayBeCaptured(const Value *V) {
4282     if (V->getType()->isPointerTy()) {
4283       PointerMayBeCaptured(V, this);
4284     } else {
4285       State.indicatePessimisticFixpoint();
4286     }
4287     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4288   }
4289 
4290   /// See CaptureTracker::tooManyUses().
4291   void tooManyUses() override {
4292     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4293   }
4294 
4295   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4296     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4297       return true;
4298     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4299         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4300     return DerefAA.getAssumedDereferenceableBytes();
4301   }
4302 
4303   /// See CaptureTracker::captured(...).
4304   bool captured(const Use *U) override {
4305     Instruction *UInst = cast<Instruction>(U->getUser());
4306     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4307                       << "\n");
4308 
4309     // Because we may reuse the tracker multiple times we keep track of the
4310     // number of explored uses ourselves as well.
4311     if (RemainingUsesToExplore-- == 0) {
4312       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4313       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4314                           /* Return */ true);
4315     }
4316 
4317     // Deal with ptr2int by following uses.
4318     if (isa<PtrToIntInst>(UInst)) {
4319       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4320       return valueMayBeCaptured(UInst);
4321     }
4322 
4323     // Explicitly catch return instructions.
4324     if (isa<ReturnInst>(UInst))
4325       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4326                           /* Return */ true);
4327 
4328     // For now we only use special logic for call sites. However, the tracker
4329     // itself knows about a lot of other non-capturing cases already.
4330     auto *CB = dyn_cast<CallBase>(UInst);
4331     if (!CB || !CB->isArgOperand(U))
4332       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4333                           /* Return */ true);
4334 
4335     unsigned ArgNo = CB->getArgOperandNo(U);
4336     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4337     // If we have a abstract no-capture attribute for the argument we can use
4338     // it to justify a non-capture attribute here. This allows recursion!
4339     auto &ArgNoCaptureAA =
4340         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4341     if (ArgNoCaptureAA.isAssumedNoCapture())
4342       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4343                           /* Return */ false);
4344     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4345       addPotentialCopy(*CB);
4346       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4347                           /* Return */ false);
4348     }
4349 
4350     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4351     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4352                         /* Return */ true);
4353   }
4354 
4355   /// Register \p CS as potential copy of the value we are checking.
4356   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4357 
4358   /// See CaptureTracker::shouldExplore(...).
4359   bool shouldExplore(const Use *U) override {
4360     // Check liveness and ignore droppable users.
4361     return !U->getUser()->isDroppable() &&
4362            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4363   }
4364 
4365   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4366   /// \p CapturedInRet, then return the appropriate value for use in the
4367   /// CaptureTracker::captured() interface.
4368   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4369                     bool CapturedInRet) {
4370     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4371                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4372     if (CapturedInMem)
4373       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4374     if (CapturedInInt)
4375       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4376     if (CapturedInRet)
4377       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4378     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4379   }
4380 
4381 private:
4382   /// The attributor providing in-flight abstract attributes.
4383   Attributor &A;
4384 
4385   /// The abstract attribute currently updated.
4386   AANoCapture &NoCaptureAA;
4387 
4388   /// The abstract liveness state.
4389   const AAIsDead &IsDeadAA;
4390 
4391   /// The state currently updated.
4392   AANoCapture::StateType &State;
4393 
4394   /// Set of potential copies of the tracked value.
4395   SmallVectorImpl<const Value *> &PotentialCopies;
4396 
4397   /// Global counter to limit the number of explored uses.
4398   unsigned &RemainingUsesToExplore;
4399 };
4400 
4401 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4402   const IRPosition &IRP = getIRPosition();
4403   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4404                                         : &IRP.getAssociatedValue();
4405   if (!V)
4406     return indicatePessimisticFixpoint();
4407 
4408   const Function *F =
4409       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4410   assert(F && "Expected a function!");
4411   const IRPosition &FnPos = IRPosition::function(*F);
4412   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4413 
4414   AANoCapture::StateType T;
4415 
4416   // Readonly means we cannot capture through memory.
4417   const auto &FnMemAA =
4418       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4419   if (FnMemAA.isAssumedReadOnly()) {
4420     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4421     if (FnMemAA.isKnownReadOnly())
4422       addKnownBits(NOT_CAPTURED_IN_MEM);
4423     else
4424       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4425   }
4426 
4427   // Make sure all returned values are different than the underlying value.
4428   // TODO: we could do this in a more sophisticated way inside
4429   //       AAReturnedValues, e.g., track all values that escape through returns
4430   //       directly somehow.
4431   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4432     bool SeenConstant = false;
4433     for (auto &It : RVAA.returned_values()) {
4434       if (isa<Constant>(It.first)) {
4435         if (SeenConstant)
4436           return false;
4437         SeenConstant = true;
4438       } else if (!isa<Argument>(It.first) ||
4439                  It.first == getAssociatedArgument())
4440         return false;
4441     }
4442     return true;
4443   };
4444 
4445   const auto &NoUnwindAA =
4446       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4447   if (NoUnwindAA.isAssumedNoUnwind()) {
4448     bool IsVoidTy = F->getReturnType()->isVoidTy();
4449     const AAReturnedValues *RVAA =
4450         IsVoidTy ? nullptr
4451                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4452 
4453                                                  DepClassTy::OPTIONAL);
4454     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4455       T.addKnownBits(NOT_CAPTURED_IN_RET);
4456       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4457         return ChangeStatus::UNCHANGED;
4458       if (NoUnwindAA.isKnownNoUnwind() &&
4459           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4460         addKnownBits(NOT_CAPTURED_IN_RET);
4461         if (isKnown(NOT_CAPTURED_IN_MEM))
4462           return indicateOptimisticFixpoint();
4463       }
4464     }
4465   }
4466 
4467   // Use the CaptureTracker interface and logic with the specialized tracker,
4468   // defined in AACaptureUseTracker, that can look at in-flight abstract
4469   // attributes and directly updates the assumed state.
4470   SmallVector<const Value *, 4> PotentialCopies;
4471   unsigned RemainingUsesToExplore =
4472       getDefaultMaxUsesToExploreForCaptureTracking();
4473   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4474                               RemainingUsesToExplore);
4475 
4476   // Check all potential copies of the associated value until we can assume
4477   // none will be captured or we have to assume at least one might be.
4478   unsigned Idx = 0;
4479   PotentialCopies.push_back(V);
4480   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4481     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4482 
4483   AANoCapture::StateType &S = getState();
4484   auto Assumed = S.getAssumed();
4485   S.intersectAssumedBits(T.getAssumed());
4486   if (!isAssumedNoCaptureMaybeReturned())
4487     return indicatePessimisticFixpoint();
4488   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4489                                    : ChangeStatus::CHANGED;
4490 }
4491 
4492 /// NoCapture attribute for function arguments.
4493 struct AANoCaptureArgument final : AANoCaptureImpl {
4494   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4495       : AANoCaptureImpl(IRP, A) {}
4496 
4497   /// See AbstractAttribute::trackStatistics()
4498   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4499 };
4500 
4501 /// NoCapture attribute for call site arguments.
4502 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4503   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4504       : AANoCaptureImpl(IRP, A) {}
4505 
4506   /// See AbstractAttribute::initialize(...).
4507   void initialize(Attributor &A) override {
4508     if (Argument *Arg = getAssociatedArgument())
4509       if (Arg->hasByValAttr())
4510         indicateOptimisticFixpoint();
4511     AANoCaptureImpl::initialize(A);
4512   }
4513 
4514   /// See AbstractAttribute::updateImpl(...).
4515   ChangeStatus updateImpl(Attributor &A) override {
4516     // TODO: Once we have call site specific value information we can provide
4517     //       call site specific liveness information and then it makes
4518     //       sense to specialize attributes for call sites arguments instead of
4519     //       redirecting requests to the callee argument.
4520     Argument *Arg = getAssociatedArgument();
4521     if (!Arg)
4522       return indicatePessimisticFixpoint();
4523     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4524     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4525     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4526   }
4527 
4528   /// See AbstractAttribute::trackStatistics()
4529   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4530 };
4531 
4532 /// NoCapture attribute for floating values.
4533 struct AANoCaptureFloating final : AANoCaptureImpl {
4534   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4535       : AANoCaptureImpl(IRP, A) {}
4536 
4537   /// See AbstractAttribute::trackStatistics()
4538   void trackStatistics() const override {
4539     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4540   }
4541 };
4542 
4543 /// NoCapture attribute for function return value.
4544 struct AANoCaptureReturned final : AANoCaptureImpl {
4545   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4546       : AANoCaptureImpl(IRP, A) {
4547     llvm_unreachable("NoCapture is not applicable to function returns!");
4548   }
4549 
4550   /// See AbstractAttribute::initialize(...).
4551   void initialize(Attributor &A) override {
4552     llvm_unreachable("NoCapture is not applicable to function returns!");
4553   }
4554 
4555   /// See AbstractAttribute::updateImpl(...).
4556   ChangeStatus updateImpl(Attributor &A) override {
4557     llvm_unreachable("NoCapture is not applicable to function returns!");
4558   }
4559 
4560   /// See AbstractAttribute::trackStatistics()
4561   void trackStatistics() const override {}
4562 };
4563 
4564 /// NoCapture attribute deduction for a call site return value.
4565 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4566   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4567       : AANoCaptureImpl(IRP, A) {}
4568 
4569   /// See AbstractAttribute::initialize(...).
4570   void initialize(Attributor &A) override {
4571     const Function *F = getAnchorScope();
4572     // Check what state the associated function can actually capture.
4573     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4574   }
4575 
4576   /// See AbstractAttribute::trackStatistics()
4577   void trackStatistics() const override {
4578     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4579   }
4580 };
4581 
4582 /// ------------------ Value Simplify Attribute ----------------------------
4583 struct AAValueSimplifyImpl : AAValueSimplify {
4584   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4585       : AAValueSimplify(IRP, A) {}
4586 
4587   /// See AbstractAttribute::initialize(...).
4588   void initialize(Attributor &A) override {
4589     if (getAssociatedValue().getType()->isVoidTy())
4590       indicatePessimisticFixpoint();
4591   }
4592 
4593   /// See AbstractAttribute::getAsStr().
4594   const std::string getAsStr() const override {
4595     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4596                         : "not-simple";
4597   }
4598 
4599   /// See AbstractAttribute::trackStatistics()
4600   void trackStatistics() const override {}
4601 
4602   /// See AAValueSimplify::getAssumedSimplifiedValue()
4603   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4604     if (!getAssumed())
4605       return const_cast<Value *>(&getAssociatedValue());
4606     return SimplifiedAssociatedValue;
4607   }
4608 
4609   /// Helper function for querying AAValueSimplify and updating candicate.
4610   /// \param QueryingValue Value trying to unify with SimplifiedValue
4611   /// \param AccumulatedSimplifiedValue Current simplification result.
4612   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4613                              Value &QueryingValue,
4614                              Optional<Value *> &AccumulatedSimplifiedValue) {
4615     // FIXME: Add a typecast support.
4616 
4617     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4618         QueryingAA, IRPosition::value(QueryingValue), DepClassTy::REQUIRED);
4619 
4620     Optional<Value *> QueryingValueSimplified =
4621         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4622 
4623     if (!QueryingValueSimplified.hasValue())
4624       return true;
4625 
4626     if (!QueryingValueSimplified.getValue())
4627       return false;
4628 
4629     Value &QueryingValueSimplifiedUnwrapped =
4630         *QueryingValueSimplified.getValue();
4631 
4632     if (AccumulatedSimplifiedValue.hasValue() &&
4633         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4634         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4635       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4636     if (AccumulatedSimplifiedValue.hasValue() &&
4637         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4638       return true;
4639 
4640     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4641                       << " is assumed to be "
4642                       << QueryingValueSimplifiedUnwrapped << "\n");
4643 
4644     AccumulatedSimplifiedValue = QueryingValueSimplified;
4645     return true;
4646   }
4647 
4648   /// Returns a candidate is found or not
4649   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4650     if (!getAssociatedValue().getType()->isIntegerTy())
4651       return false;
4652 
4653     const auto &AA =
4654         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4655 
4656     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4657 
4658     if (!COpt.hasValue()) {
4659       SimplifiedAssociatedValue = llvm::None;
4660       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4661       return true;
4662     }
4663     if (auto *C = COpt.getValue()) {
4664       SimplifiedAssociatedValue = C;
4665       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4666       return true;
4667     }
4668     return false;
4669   }
4670 
4671   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4672     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4673       return true;
4674     if (askSimplifiedValueFor<AAPotentialValues>(A))
4675       return true;
4676     return false;
4677   }
4678 
4679   /// See AbstractAttribute::manifest(...).
4680   ChangeStatus manifest(Attributor &A) override {
4681     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4682 
4683     if (SimplifiedAssociatedValue.hasValue() &&
4684         !SimplifiedAssociatedValue.getValue())
4685       return Changed;
4686 
4687     Value &V = getAssociatedValue();
4688     auto *C = SimplifiedAssociatedValue.hasValue()
4689                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4690                   : UndefValue::get(V.getType());
4691     if (C) {
4692       // We can replace the AssociatedValue with the constant.
4693       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4694         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4695                           << " :: " << *this << "\n");
4696         if (A.changeValueAfterManifest(V, *C))
4697           Changed = ChangeStatus::CHANGED;
4698       }
4699     }
4700 
4701     return Changed | AAValueSimplify::manifest(A);
4702   }
4703 
4704   /// See AbstractState::indicatePessimisticFixpoint(...).
4705   ChangeStatus indicatePessimisticFixpoint() override {
4706     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4707     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4708     SimplifiedAssociatedValue = &getAssociatedValue();
4709     indicateOptimisticFixpoint();
4710     return ChangeStatus::CHANGED;
4711   }
4712 
4713 protected:
4714   // An assumed simplified value. Initially, it is set to Optional::None, which
4715   // means that the value is not clear under current assumption. If in the
4716   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4717   // returns orignal associated value.
4718   Optional<Value *> SimplifiedAssociatedValue;
4719 };
4720 
4721 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4722   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4723       : AAValueSimplifyImpl(IRP, A) {}
4724 
4725   void initialize(Attributor &A) override {
4726     AAValueSimplifyImpl::initialize(A);
4727     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4728       indicatePessimisticFixpoint();
4729     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4730                  Attribute::StructRet, Attribute::Nest},
4731                 /* IgnoreSubsumingPositions */ true))
4732       indicatePessimisticFixpoint();
4733 
4734     // FIXME: This is a hack to prevent us from propagating function poiner in
4735     // the new pass manager CGSCC pass as it creates call edges the
4736     // CallGraphUpdater cannot handle yet.
4737     Value &V = getAssociatedValue();
4738     if (V.getType()->isPointerTy() &&
4739         V.getType()->getPointerElementType()->isFunctionTy() &&
4740         !A.isModulePass())
4741       indicatePessimisticFixpoint();
4742   }
4743 
4744   /// See AbstractAttribute::updateImpl(...).
4745   ChangeStatus updateImpl(Attributor &A) override {
4746     // Byval is only replacable if it is readonly otherwise we would write into
4747     // the replaced value and not the copy that byval creates implicitly.
4748     Argument *Arg = getAssociatedArgument();
4749     if (Arg->hasByValAttr()) {
4750       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4751       //       there is no race by not copying a constant byval.
4752       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4753                                                        DepClassTy::REQUIRED);
4754       if (!MemAA.isAssumedReadOnly())
4755         return indicatePessimisticFixpoint();
4756     }
4757 
4758     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4759 
4760     auto PredForCallSite = [&](AbstractCallSite ACS) {
4761       const IRPosition &ACSArgPos =
4762           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4763       // Check if a coresponding argument was found or if it is on not
4764       // associated (which can happen for callback calls).
4765       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4766         return false;
4767 
4768       // We can only propagate thread independent values through callbacks.
4769       // This is different to direct/indirect call sites because for them we
4770       // know the thread executing the caller and callee is the same. For
4771       // callbacks this is not guaranteed, thus a thread dependent value could
4772       // be different for the caller and callee, making it invalid to propagate.
4773       Value &ArgOp = ACSArgPos.getAssociatedValue();
4774       if (ACS.isCallbackCall())
4775         if (auto *C = dyn_cast<Constant>(&ArgOp))
4776           if (C->isThreadDependent())
4777             return false;
4778       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4779     };
4780 
4781     bool AllCallSitesKnown;
4782     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4783                                 AllCallSitesKnown))
4784       if (!askSimplifiedValueForOtherAAs(A))
4785         return indicatePessimisticFixpoint();
4786 
4787     // If a candicate was found in this update, return CHANGED.
4788     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4789                ? ChangeStatus::UNCHANGED
4790                : ChangeStatus ::CHANGED;
4791   }
4792 
4793   /// See AbstractAttribute::trackStatistics()
4794   void trackStatistics() const override {
4795     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4796   }
4797 };
4798 
4799 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4800   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4801       : AAValueSimplifyImpl(IRP, A) {}
4802 
4803   /// See AbstractAttribute::updateImpl(...).
4804   ChangeStatus updateImpl(Attributor &A) override {
4805     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4806 
4807     auto PredForReturned = [&](Value &V) {
4808       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4809     };
4810 
4811     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4812       if (!askSimplifiedValueForOtherAAs(A))
4813         return indicatePessimisticFixpoint();
4814 
4815     // If a candicate was found in this update, return CHANGED.
4816     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4817                ? ChangeStatus::UNCHANGED
4818                : ChangeStatus ::CHANGED;
4819   }
4820 
4821   ChangeStatus manifest(Attributor &A) override {
4822     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4823 
4824     if (SimplifiedAssociatedValue.hasValue() &&
4825         !SimplifiedAssociatedValue.getValue())
4826       return Changed;
4827 
4828     Value &V = getAssociatedValue();
4829     auto *C = SimplifiedAssociatedValue.hasValue()
4830                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4831                   : UndefValue::get(V.getType());
4832     if (C) {
4833       auto PredForReturned =
4834           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4835             // We can replace the AssociatedValue with the constant.
4836             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4837               return true;
4838 
4839             for (ReturnInst *RI : RetInsts) {
4840               if (RI->getFunction() != getAnchorScope())
4841                 continue;
4842               auto *RC = C;
4843               if (RC->getType() != RI->getReturnValue()->getType())
4844                 RC = ConstantExpr::getBitCast(RC,
4845                                               RI->getReturnValue()->getType());
4846               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4847                                 << " in " << *RI << " :: " << *this << "\n");
4848               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4849                 Changed = ChangeStatus::CHANGED;
4850             }
4851             return true;
4852           };
4853       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4854     }
4855 
4856     return Changed | AAValueSimplify::manifest(A);
4857   }
4858 
4859   /// See AbstractAttribute::trackStatistics()
4860   void trackStatistics() const override {
4861     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4862   }
4863 };
4864 
4865 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4866   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4867       : AAValueSimplifyImpl(IRP, A) {}
4868 
4869   /// See AbstractAttribute::initialize(...).
4870   void initialize(Attributor &A) override {
4871     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4872     //        Needs investigation.
4873     // AAValueSimplifyImpl::initialize(A);
4874     Value &V = getAnchorValue();
4875 
4876     // TODO: add other stuffs
4877     if (isa<Constant>(V))
4878       indicatePessimisticFixpoint();
4879   }
4880 
4881   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4882   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4883   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4884   /// updated and \p Changed is set appropriately.
4885   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4886                               ChangeStatus &Changed) {
4887     if (!ICmp)
4888       return false;
4889     if (!ICmp->isEquality())
4890       return false;
4891 
4892     // This is a comparison with == or !-. We check for nullptr now.
4893     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4894     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4895     if (!Op0IsNull && !Op1IsNull)
4896       return false;
4897 
4898     LLVMContext &Ctx = ICmp->getContext();
4899     // Check for `nullptr ==/!= nullptr` first:
4900     if (Op0IsNull && Op1IsNull) {
4901       Value *NewVal = ConstantInt::get(
4902           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4903       assert(!SimplifiedAssociatedValue.hasValue() &&
4904              "Did not expect non-fixed value for constant comparison");
4905       SimplifiedAssociatedValue = NewVal;
4906       indicateOptimisticFixpoint();
4907       Changed = ChangeStatus::CHANGED;
4908       return true;
4909     }
4910 
4911     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4912     // non-nullptr operand and if we assume it's non-null we can conclude the
4913     // result of the comparison.
4914     assert((Op0IsNull || Op1IsNull) &&
4915            "Expected nullptr versus non-nullptr comparison at this point");
4916 
4917     // The index is the operand that we assume is not null.
4918     unsigned PtrIdx = Op0IsNull;
4919     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4920         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4921         DepClassTy::REQUIRED);
4922     if (!PtrNonNullAA.isAssumedNonNull())
4923       return false;
4924 
4925     // The new value depends on the predicate, true for != and false for ==.
4926     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4927                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4928 
4929     assert((!SimplifiedAssociatedValue.hasValue() ||
4930             SimplifiedAssociatedValue == NewVal) &&
4931            "Did not expect to change value for zero-comparison");
4932 
4933     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4934     SimplifiedAssociatedValue = NewVal;
4935 
4936     if (PtrNonNullAA.isKnownNonNull())
4937       indicateOptimisticFixpoint();
4938 
4939     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4940     return true;
4941   }
4942 
4943   /// See AbstractAttribute::updateImpl(...).
4944   ChangeStatus updateImpl(Attributor &A) override {
4945     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4946 
4947     ChangeStatus Changed;
4948     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4949                                Changed))
4950       return Changed;
4951 
4952     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4953                             bool Stripped) -> bool {
4954       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V),
4955                                              DepClassTy::REQUIRED);
4956       if (!Stripped && this == &AA) {
4957         // TODO: Look the instruction and check recursively.
4958 
4959         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4960                           << "\n");
4961         return false;
4962       }
4963       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4964     };
4965 
4966     bool Dummy = false;
4967     if (!genericValueTraversal<AAValueSimplify, bool>(
4968             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4969             /* UseValueSimplify */ false))
4970       if (!askSimplifiedValueForOtherAAs(A))
4971         return indicatePessimisticFixpoint();
4972 
4973     // If a candicate was found in this update, return CHANGED.
4974 
4975     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4976                ? ChangeStatus::UNCHANGED
4977                : ChangeStatus ::CHANGED;
4978   }
4979 
4980   /// See AbstractAttribute::trackStatistics()
4981   void trackStatistics() const override {
4982     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4983   }
4984 };
4985 
4986 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4987   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4988       : AAValueSimplifyImpl(IRP, A) {}
4989 
4990   /// See AbstractAttribute::initialize(...).
4991   void initialize(Attributor &A) override {
4992     SimplifiedAssociatedValue = &getAnchorValue();
4993     indicateOptimisticFixpoint();
4994   }
4995   /// See AbstractAttribute::initialize(...).
4996   ChangeStatus updateImpl(Attributor &A) override {
4997     llvm_unreachable(
4998         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4999   }
5000   /// See AbstractAttribute::trackStatistics()
5001   void trackStatistics() const override {
5002     STATS_DECLTRACK_FN_ATTR(value_simplify)
5003   }
5004 };
5005 
5006 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
5007   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
5008       : AAValueSimplifyFunction(IRP, A) {}
5009   /// See AbstractAttribute::trackStatistics()
5010   void trackStatistics() const override {
5011     STATS_DECLTRACK_CS_ATTR(value_simplify)
5012   }
5013 };
5014 
5015 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
5016   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
5017       : AAValueSimplifyReturned(IRP, A) {}
5018 
5019   /// See AbstractAttribute::manifest(...).
5020   ChangeStatus manifest(Attributor &A) override {
5021     return AAValueSimplifyImpl::manifest(A);
5022   }
5023 
5024   void trackStatistics() const override {
5025     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
5026   }
5027 };
5028 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
5029   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
5030       : AAValueSimplifyFloating(IRP, A) {}
5031 
5032   /// See AbstractAttribute::manifest(...).
5033   ChangeStatus manifest(Attributor &A) override {
5034     ChangeStatus Changed = ChangeStatus::UNCHANGED;
5035 
5036     if (SimplifiedAssociatedValue.hasValue() &&
5037         !SimplifiedAssociatedValue.getValue())
5038       return Changed;
5039 
5040     Value &V = getAssociatedValue();
5041     auto *C = SimplifiedAssociatedValue.hasValue()
5042                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
5043                   : UndefValue::get(V.getType());
5044     if (C) {
5045       Use &U = cast<CallBase>(&getAnchorValue())
5046                    ->getArgOperandUse(getCallSiteArgNo());
5047       // We can replace the AssociatedValue with the constant.
5048       if (&V != C && V.getType() == C->getType()) {
5049         if (A.changeUseAfterManifest(U, *C))
5050           Changed = ChangeStatus::CHANGED;
5051       }
5052     }
5053 
5054     return Changed | AAValueSimplify::manifest(A);
5055   }
5056 
5057   void trackStatistics() const override {
5058     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5059   }
5060 };
5061 
5062 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5063 struct AAHeapToStackImpl : public AAHeapToStack {
5064   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5065       : AAHeapToStack(IRP, A) {}
5066 
5067   const std::string getAsStr() const override {
5068     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5069   }
5070 
5071   ChangeStatus manifest(Attributor &A) override {
5072     assert(getState().isValidState() &&
5073            "Attempted to manifest an invalid state!");
5074 
5075     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5076     Function *F = getAnchorScope();
5077     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5078 
5079     for (Instruction *MallocCall : MallocCalls) {
5080       // This malloc cannot be replaced.
5081       if (BadMallocCalls.count(MallocCall))
5082         continue;
5083 
5084       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5085         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5086         A.deleteAfterManifest(*FreeCall);
5087         HasChanged = ChangeStatus::CHANGED;
5088       }
5089 
5090       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5091                         << "\n");
5092 
5093       Align Alignment;
5094       Value *Size;
5095       if (isCallocLikeFn(MallocCall, TLI)) {
5096         auto *Num = MallocCall->getOperand(0);
5097         auto *SizeT = MallocCall->getOperand(1);
5098         IRBuilder<> B(MallocCall);
5099         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5100       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5101         Size = MallocCall->getOperand(1);
5102         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5103                                    ->getValue()
5104                                    .getZExtValue())
5105                         .valueOrOne();
5106       } else {
5107         Size = MallocCall->getOperand(0);
5108       }
5109 
5110       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5111       Instruction *AI =
5112           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5113                          "", MallocCall->getNextNode());
5114 
5115       if (AI->getType() != MallocCall->getType())
5116         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5117                              AI->getNextNode());
5118 
5119       A.changeValueAfterManifest(*MallocCall, *AI);
5120 
5121       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5122         auto *NBB = II->getNormalDest();
5123         BranchInst::Create(NBB, MallocCall->getParent());
5124         A.deleteAfterManifest(*MallocCall);
5125       } else {
5126         A.deleteAfterManifest(*MallocCall);
5127       }
5128 
5129       // Zero out the allocated memory if it was a calloc.
5130       if (isCallocLikeFn(MallocCall, TLI)) {
5131         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5132                                    AI->getNextNode());
5133         Value *Ops[] = {
5134             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5135             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5136 
5137         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5138         Module *M = F->getParent();
5139         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5140         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5141       }
5142       HasChanged = ChangeStatus::CHANGED;
5143     }
5144 
5145     return HasChanged;
5146   }
5147 
5148   /// Collection of all malloc calls in a function.
5149   SmallSetVector<Instruction *, 4> MallocCalls;
5150 
5151   /// Collection of malloc calls that cannot be converted.
5152   DenseSet<const Instruction *> BadMallocCalls;
5153 
5154   /// A map for each malloc call to the set of associated free calls.
5155   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5156 
5157   ChangeStatus updateImpl(Attributor &A) override;
5158 };
5159 
5160 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5161   const Function *F = getAnchorScope();
5162   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5163 
5164   MustBeExecutedContextExplorer &Explorer =
5165       A.getInfoCache().getMustBeExecutedContextExplorer();
5166 
5167   auto FreeCheck = [&](Instruction &I) {
5168     const auto &Frees = FreesForMalloc.lookup(&I);
5169     if (Frees.size() != 1)
5170       return false;
5171     Instruction *UniqueFree = *Frees.begin();
5172     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5173   };
5174 
5175   auto UsesCheck = [&](Instruction &I) {
5176     bool ValidUsesOnly = true;
5177     bool MustUse = true;
5178     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5179       Instruction *UserI = cast<Instruction>(U.getUser());
5180       if (isa<LoadInst>(UserI))
5181         return true;
5182       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5183         if (SI->getValueOperand() == U.get()) {
5184           LLVM_DEBUG(dbgs()
5185                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5186           ValidUsesOnly = false;
5187         } else {
5188           // A store into the malloc'ed memory is fine.
5189         }
5190         return true;
5191       }
5192       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5193         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5194           return true;
5195         // Record malloc.
5196         if (isFreeCall(UserI, TLI)) {
5197           if (MustUse) {
5198             FreesForMalloc[&I].insert(UserI);
5199           } else {
5200             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5201                               << *UserI << "\n");
5202             ValidUsesOnly = false;
5203           }
5204           return true;
5205         }
5206 
5207         unsigned ArgNo = CB->getArgOperandNo(&U);
5208 
5209         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5210             *this, IRPosition::callsite_argument(*CB, ArgNo),
5211             DepClassTy::REQUIRED);
5212 
5213         // If a callsite argument use is nofree, we are fine.
5214         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5215             *this, IRPosition::callsite_argument(*CB, ArgNo),
5216             DepClassTy::REQUIRED);
5217 
5218         if (!NoCaptureAA.isAssumedNoCapture() ||
5219             !ArgNoFreeAA.isAssumedNoFree()) {
5220           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5221           ValidUsesOnly = false;
5222         }
5223         return true;
5224       }
5225 
5226       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5227           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5228         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5229         Follow = true;
5230         return true;
5231       }
5232       // Unknown user for which we can not track uses further (in a way that
5233       // makes sense).
5234       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5235       ValidUsesOnly = false;
5236       return true;
5237     };
5238     A.checkForAllUses(Pred, *this, I);
5239     return ValidUsesOnly;
5240   };
5241 
5242   auto MallocCallocCheck = [&](Instruction &I) {
5243     if (BadMallocCalls.count(&I))
5244       return true;
5245 
5246     bool IsMalloc = isMallocLikeFn(&I, TLI);
5247     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5248     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5249     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5250       BadMallocCalls.insert(&I);
5251       return true;
5252     }
5253 
5254     if (IsMalloc) {
5255       if (MaxHeapToStackSize == -1) {
5256         if (UsesCheck(I) || FreeCheck(I)) {
5257           MallocCalls.insert(&I);
5258           return true;
5259         }
5260       }
5261       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5262         if (Size->getValue().ule(MaxHeapToStackSize))
5263           if (UsesCheck(I) || FreeCheck(I)) {
5264             MallocCalls.insert(&I);
5265             return true;
5266           }
5267     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5268       if (MaxHeapToStackSize == -1) {
5269         if (UsesCheck(I) || FreeCheck(I)) {
5270           MallocCalls.insert(&I);
5271           return true;
5272         }
5273       }
5274       // Only if the alignment and sizes are constant.
5275       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5276         if (Size->getValue().ule(MaxHeapToStackSize))
5277           if (UsesCheck(I) || FreeCheck(I)) {
5278             MallocCalls.insert(&I);
5279             return true;
5280           }
5281     } else if (IsCalloc) {
5282       if (MaxHeapToStackSize == -1) {
5283         if (UsesCheck(I) || FreeCheck(I)) {
5284           MallocCalls.insert(&I);
5285           return true;
5286         }
5287       }
5288       bool Overflow = false;
5289       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5290         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5291           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5292                   .ule(MaxHeapToStackSize))
5293             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5294               MallocCalls.insert(&I);
5295               return true;
5296             }
5297     }
5298 
5299     BadMallocCalls.insert(&I);
5300     return true;
5301   };
5302 
5303   size_t NumBadMallocs = BadMallocCalls.size();
5304 
5305   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5306 
5307   if (NumBadMallocs != BadMallocCalls.size())
5308     return ChangeStatus::CHANGED;
5309 
5310   return ChangeStatus::UNCHANGED;
5311 }
5312 
5313 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5314   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5315       : AAHeapToStackImpl(IRP, A) {}
5316 
5317   /// See AbstractAttribute::trackStatistics().
5318   void trackStatistics() const override {
5319     STATS_DECL(
5320         MallocCalls, Function,
5321         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5322     for (auto *C : MallocCalls)
5323       if (!BadMallocCalls.count(C))
5324         ++BUILD_STAT_NAME(MallocCalls, Function);
5325   }
5326 };
5327 
5328 /// ----------------------- Privatizable Pointers ------------------------------
5329 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5330   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5331       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5332 
5333   ChangeStatus indicatePessimisticFixpoint() override {
5334     AAPrivatizablePtr::indicatePessimisticFixpoint();
5335     PrivatizableType = nullptr;
5336     return ChangeStatus::CHANGED;
5337   }
5338 
5339   /// Identify the type we can chose for a private copy of the underlying
5340   /// argument. None means it is not clear yet, nullptr means there is none.
5341   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5342 
5343   /// Return a privatizable type that encloses both T0 and T1.
5344   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5345   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5346     if (!T0.hasValue())
5347       return T1;
5348     if (!T1.hasValue())
5349       return T0;
5350     if (T0 == T1)
5351       return T0;
5352     return nullptr;
5353   }
5354 
5355   Optional<Type *> getPrivatizableType() const override {
5356     return PrivatizableType;
5357   }
5358 
5359   const std::string getAsStr() const override {
5360     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5361   }
5362 
5363 protected:
5364   Optional<Type *> PrivatizableType;
5365 };
5366 
5367 // TODO: Do this for call site arguments (probably also other values) as well.
5368 
5369 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5370   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5371       : AAPrivatizablePtrImpl(IRP, A) {}
5372 
5373   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5374   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5375     // If this is a byval argument and we know all the call sites (so we can
5376     // rewrite them), there is no need to check them explicitly.
5377     bool AllCallSitesKnown;
5378     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5379         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5380                                true, AllCallSitesKnown))
5381       return getAssociatedValue().getType()->getPointerElementType();
5382 
5383     Optional<Type *> Ty;
5384     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5385 
5386     // Make sure the associated call site argument has the same type at all call
5387     // sites and it is an allocation we know is safe to privatize, for now that
5388     // means we only allow alloca instructions.
5389     // TODO: We can additionally analyze the accesses in the callee to  create
5390     //       the type from that information instead. That is a little more
5391     //       involved and will be done in a follow up patch.
5392     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5393       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5394       // Check if a coresponding argument was found or if it is one not
5395       // associated (which can happen for callback calls).
5396       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5397         return false;
5398 
5399       // Check that all call sites agree on a type.
5400       auto &PrivCSArgAA =
5401           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5402       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5403 
5404       LLVM_DEBUG({
5405         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5406         if (CSTy.hasValue() && CSTy.getValue())
5407           CSTy.getValue()->print(dbgs());
5408         else if (CSTy.hasValue())
5409           dbgs() << "<nullptr>";
5410         else
5411           dbgs() << "<none>";
5412       });
5413 
5414       Ty = combineTypes(Ty, CSTy);
5415 
5416       LLVM_DEBUG({
5417         dbgs() << " : New Type: ";
5418         if (Ty.hasValue() && Ty.getValue())
5419           Ty.getValue()->print(dbgs());
5420         else if (Ty.hasValue())
5421           dbgs() << "<nullptr>";
5422         else
5423           dbgs() << "<none>";
5424         dbgs() << "\n";
5425       });
5426 
5427       return !Ty.hasValue() || Ty.getValue();
5428     };
5429 
5430     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5431       return nullptr;
5432     return Ty;
5433   }
5434 
5435   /// See AbstractAttribute::updateImpl(...).
5436   ChangeStatus updateImpl(Attributor &A) override {
5437     PrivatizableType = identifyPrivatizableType(A);
5438     if (!PrivatizableType.hasValue())
5439       return ChangeStatus::UNCHANGED;
5440     if (!PrivatizableType.getValue())
5441       return indicatePessimisticFixpoint();
5442 
5443     // The dependence is optional so we don't give up once we give up on the
5444     // alignment.
5445     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5446                         DepClassTy::OPTIONAL);
5447 
5448     // Avoid arguments with padding for now.
5449     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5450         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5451                                                 A.getInfoCache().getDL())) {
5452       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5453       return indicatePessimisticFixpoint();
5454     }
5455 
5456     // Verify callee and caller agree on how the promoted argument would be
5457     // passed.
5458     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5459     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5460     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5461     Function &Fn = *getIRPosition().getAnchorScope();
5462     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5463     ArgsToPromote.insert(getAssociatedArgument());
5464     const auto *TTI =
5465         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5466     if (!TTI ||
5467         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5468             Fn, *TTI, ArgsToPromote, Dummy) ||
5469         ArgsToPromote.empty()) {
5470       LLVM_DEBUG(
5471           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5472                  << Fn.getName() << "\n");
5473       return indicatePessimisticFixpoint();
5474     }
5475 
5476     // Collect the types that will replace the privatizable type in the function
5477     // signature.
5478     SmallVector<Type *, 16> ReplacementTypes;
5479     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5480 
5481     // Register a rewrite of the argument.
5482     Argument *Arg = getAssociatedArgument();
5483     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5484       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5485       return indicatePessimisticFixpoint();
5486     }
5487 
5488     unsigned ArgNo = Arg->getArgNo();
5489 
5490     // Helper to check if for the given call site the associated argument is
5491     // passed to a callback where the privatization would be different.
5492     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5493       SmallVector<const Use *, 4> CallbackUses;
5494       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5495       for (const Use *U : CallbackUses) {
5496         AbstractCallSite CBACS(U);
5497         assert(CBACS && CBACS.isCallbackCall());
5498         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5499           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5500 
5501           LLVM_DEBUG({
5502             dbgs()
5503                 << "[AAPrivatizablePtr] Argument " << *Arg
5504                 << "check if can be privatized in the context of its parent ("
5505                 << Arg->getParent()->getName()
5506                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5507                    "callback ("
5508                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5509                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5510                 << CBACS.getCallArgOperand(CBArg) << " vs "
5511                 << CB.getArgOperand(ArgNo) << "\n"
5512                 << "[AAPrivatizablePtr] " << CBArg << " : "
5513                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5514           });
5515 
5516           if (CBArgNo != int(ArgNo))
5517             continue;
5518           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5519               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5520           if (CBArgPrivAA.isValidState()) {
5521             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5522             if (!CBArgPrivTy.hasValue())
5523               continue;
5524             if (CBArgPrivTy.getValue() == PrivatizableType)
5525               continue;
5526           }
5527 
5528           LLVM_DEBUG({
5529             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5530                    << " cannot be privatized in the context of its parent ("
5531                    << Arg->getParent()->getName()
5532                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5533                       "callback ("
5534                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5535                    << ").\n[AAPrivatizablePtr] for which the argument "
5536                       "privatization is not compatible.\n";
5537           });
5538           return false;
5539         }
5540       }
5541       return true;
5542     };
5543 
5544     // Helper to check if for the given call site the associated argument is
5545     // passed to a direct call where the privatization would be different.
5546     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5547       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5548       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5549       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5550              "Expected a direct call operand for callback call operand");
5551 
5552       LLVM_DEBUG({
5553         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5554                << " check if be privatized in the context of its parent ("
5555                << Arg->getParent()->getName()
5556                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5557                   "direct call of ("
5558                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5559                << ").\n";
5560       });
5561 
5562       Function *DCCallee = DC->getCalledFunction();
5563       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5564         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5565             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5566             DepClassTy::REQUIRED);
5567         if (DCArgPrivAA.isValidState()) {
5568           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5569           if (!DCArgPrivTy.hasValue())
5570             return true;
5571           if (DCArgPrivTy.getValue() == PrivatizableType)
5572             return true;
5573         }
5574       }
5575 
5576       LLVM_DEBUG({
5577         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5578                << " cannot be privatized in the context of its parent ("
5579                << Arg->getParent()->getName()
5580                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5581                   "direct call of ("
5582                << ACS.getInstruction()->getCalledFunction()->getName()
5583                << ").\n[AAPrivatizablePtr] for which the argument "
5584                   "privatization is not compatible.\n";
5585       });
5586       return false;
5587     };
5588 
5589     // Helper to check if the associated argument is used at the given abstract
5590     // call site in a way that is incompatible with the privatization assumed
5591     // here.
5592     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5593       if (ACS.isDirectCall())
5594         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5595       if (ACS.isCallbackCall())
5596         return IsCompatiblePrivArgOfDirectCS(ACS);
5597       return false;
5598     };
5599 
5600     bool AllCallSitesKnown;
5601     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5602                                 AllCallSitesKnown))
5603       return indicatePessimisticFixpoint();
5604 
5605     return ChangeStatus::UNCHANGED;
5606   }
5607 
5608   /// Given a type to private \p PrivType, collect the constituates (which are
5609   /// used) in \p ReplacementTypes.
5610   static void
5611   identifyReplacementTypes(Type *PrivType,
5612                            SmallVectorImpl<Type *> &ReplacementTypes) {
5613     // TODO: For now we expand the privatization type to the fullest which can
5614     //       lead to dead arguments that need to be removed later.
5615     assert(PrivType && "Expected privatizable type!");
5616 
5617     // Traverse the type, extract constituate types on the outermost level.
5618     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5619       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5620         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5621     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5622       ReplacementTypes.append(PrivArrayType->getNumElements(),
5623                               PrivArrayType->getElementType());
5624     } else {
5625       ReplacementTypes.push_back(PrivType);
5626     }
5627   }
5628 
5629   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5630   /// The values needed are taken from the arguments of \p F starting at
5631   /// position \p ArgNo.
5632   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5633                                    unsigned ArgNo, Instruction &IP) {
5634     assert(PrivType && "Expected privatizable type!");
5635 
5636     IRBuilder<NoFolder> IRB(&IP);
5637     const DataLayout &DL = F.getParent()->getDataLayout();
5638 
5639     // Traverse the type, build GEPs and stores.
5640     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5641       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5642       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5643         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5644         Value *Ptr =
5645             constructPointer(PointeeTy, PrivType, &Base,
5646                              PrivStructLayout->getElementOffset(u), IRB, DL);
5647         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5648       }
5649     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5650       Type *PointeeTy = PrivArrayType->getElementType();
5651       Type *PointeePtrTy = PointeeTy->getPointerTo();
5652       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5653       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5654         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5655                                       u * PointeeTySize, IRB, DL);
5656         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5657       }
5658     } else {
5659       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5660     }
5661   }
5662 
5663   /// Extract values from \p Base according to the type \p PrivType at the
5664   /// call position \p ACS. The values are appended to \p ReplacementValues.
5665   void createReplacementValues(Align Alignment, Type *PrivType,
5666                                AbstractCallSite ACS, Value *Base,
5667                                SmallVectorImpl<Value *> &ReplacementValues) {
5668     assert(Base && "Expected base value!");
5669     assert(PrivType && "Expected privatizable type!");
5670     Instruction *IP = ACS.getInstruction();
5671 
5672     IRBuilder<NoFolder> IRB(IP);
5673     const DataLayout &DL = IP->getModule()->getDataLayout();
5674 
5675     if (Base->getType()->getPointerElementType() != PrivType)
5676       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5677                                                  "", ACS.getInstruction());
5678 
5679     // Traverse the type, build GEPs and loads.
5680     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5681       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5682       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5683         Type *PointeeTy = PrivStructType->getElementType(u);
5684         Value *Ptr =
5685             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5686                              PrivStructLayout->getElementOffset(u), IRB, DL);
5687         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5688         L->setAlignment(Alignment);
5689         ReplacementValues.push_back(L);
5690       }
5691     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5692       Type *PointeeTy = PrivArrayType->getElementType();
5693       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5694       Type *PointeePtrTy = PointeeTy->getPointerTo();
5695       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5696         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5697                                       u * PointeeTySize, IRB, DL);
5698         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5699         L->setAlignment(Alignment);
5700         ReplacementValues.push_back(L);
5701       }
5702     } else {
5703       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5704       L->setAlignment(Alignment);
5705       ReplacementValues.push_back(L);
5706     }
5707   }
5708 
5709   /// See AbstractAttribute::manifest(...)
5710   ChangeStatus manifest(Attributor &A) override {
5711     if (!PrivatizableType.hasValue())
5712       return ChangeStatus::UNCHANGED;
5713     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5714 
5715     // Collect all tail calls in the function as we cannot allow new allocas to
5716     // escape into tail recursion.
5717     // TODO: Be smarter about new allocas escaping into tail calls.
5718     SmallVector<CallInst *, 16> TailCalls;
5719     if (!A.checkForAllInstructions(
5720             [&](Instruction &I) {
5721               CallInst &CI = cast<CallInst>(I);
5722               if (CI.isTailCall())
5723                 TailCalls.push_back(&CI);
5724               return true;
5725             },
5726             *this, {Instruction::Call}))
5727       return ChangeStatus::UNCHANGED;
5728 
5729     Argument *Arg = getAssociatedArgument();
5730     // Query AAAlign attribute for alignment of associated argument to
5731     // determine the best alignment of loads.
5732     const auto &AlignAA =
5733         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5734 
5735     // Callback to repair the associated function. A new alloca is placed at the
5736     // beginning and initialized with the values passed through arguments. The
5737     // new alloca replaces the use of the old pointer argument.
5738     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5739         [=](const Attributor::ArgumentReplacementInfo &ARI,
5740             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5741           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5742           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5743           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5744                                            Arg->getName() + ".priv", IP);
5745           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5746                                ArgIt->getArgNo(), *IP);
5747 
5748           if (AI->getType() != Arg->getType())
5749             AI =
5750                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5751           Arg->replaceAllUsesWith(AI);
5752 
5753           for (CallInst *CI : TailCalls)
5754             CI->setTailCall(false);
5755         };
5756 
5757     // Callback to repair a call site of the associated function. The elements
5758     // of the privatizable type are loaded prior to the call and passed to the
5759     // new function version.
5760     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5761         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5762                       AbstractCallSite ACS,
5763                       SmallVectorImpl<Value *> &NewArgOperands) {
5764           // When no alignment is specified for the load instruction,
5765           // natural alignment is assumed.
5766           createReplacementValues(
5767               assumeAligned(AlignAA.getAssumedAlign()),
5768               PrivatizableType.getValue(), ACS,
5769               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5770               NewArgOperands);
5771         };
5772 
5773     // Collect the types that will replace the privatizable type in the function
5774     // signature.
5775     SmallVector<Type *, 16> ReplacementTypes;
5776     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5777 
5778     // Register a rewrite of the argument.
5779     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5780                                            std::move(FnRepairCB),
5781                                            std::move(ACSRepairCB)))
5782       return ChangeStatus::CHANGED;
5783     return ChangeStatus::UNCHANGED;
5784   }
5785 
5786   /// See AbstractAttribute::trackStatistics()
5787   void trackStatistics() const override {
5788     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5789   }
5790 };
5791 
5792 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5793   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5794       : AAPrivatizablePtrImpl(IRP, A) {}
5795 
5796   /// See AbstractAttribute::initialize(...).
5797   virtual void initialize(Attributor &A) override {
5798     // TODO: We can privatize more than arguments.
5799     indicatePessimisticFixpoint();
5800   }
5801 
5802   ChangeStatus updateImpl(Attributor &A) override {
5803     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5804                      "updateImpl will not be called");
5805   }
5806 
5807   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5808   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5809     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5810     if (!Obj) {
5811       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5812       return nullptr;
5813     }
5814 
5815     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5816       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5817         if (CI->isOne())
5818           return Obj->getType()->getPointerElementType();
5819     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5820       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5821           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5822       if (PrivArgAA.isAssumedPrivatizablePtr())
5823         return Obj->getType()->getPointerElementType();
5824     }
5825 
5826     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5827                          "alloca nor privatizable argument: "
5828                       << *Obj << "!\n");
5829     return nullptr;
5830   }
5831 
5832   /// See AbstractAttribute::trackStatistics()
5833   void trackStatistics() const override {
5834     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5835   }
5836 };
5837 
5838 struct AAPrivatizablePtrCallSiteArgument final
5839     : public AAPrivatizablePtrFloating {
5840   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5841       : AAPrivatizablePtrFloating(IRP, A) {}
5842 
5843   /// See AbstractAttribute::initialize(...).
5844   void initialize(Attributor &A) override {
5845     if (getIRPosition().hasAttr(Attribute::ByVal))
5846       indicateOptimisticFixpoint();
5847   }
5848 
5849   /// See AbstractAttribute::updateImpl(...).
5850   ChangeStatus updateImpl(Attributor &A) override {
5851     PrivatizableType = identifyPrivatizableType(A);
5852     if (!PrivatizableType.hasValue())
5853       return ChangeStatus::UNCHANGED;
5854     if (!PrivatizableType.getValue())
5855       return indicatePessimisticFixpoint();
5856 
5857     const IRPosition &IRP = getIRPosition();
5858     auto &NoCaptureAA =
5859         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5860     if (!NoCaptureAA.isAssumedNoCapture()) {
5861       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5862       return indicatePessimisticFixpoint();
5863     }
5864 
5865     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5866     if (!NoAliasAA.isAssumedNoAlias()) {
5867       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5868       return indicatePessimisticFixpoint();
5869     }
5870 
5871     const auto &MemBehaviorAA =
5872         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5873     if (!MemBehaviorAA.isAssumedReadOnly()) {
5874       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5875       return indicatePessimisticFixpoint();
5876     }
5877 
5878     return ChangeStatus::UNCHANGED;
5879   }
5880 
5881   /// See AbstractAttribute::trackStatistics()
5882   void trackStatistics() const override {
5883     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5884   }
5885 };
5886 
5887 struct AAPrivatizablePtrCallSiteReturned final
5888     : public AAPrivatizablePtrFloating {
5889   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5890       : AAPrivatizablePtrFloating(IRP, A) {}
5891 
5892   /// See AbstractAttribute::initialize(...).
5893   void initialize(Attributor &A) override {
5894     // TODO: We can privatize more than arguments.
5895     indicatePessimisticFixpoint();
5896   }
5897 
5898   /// See AbstractAttribute::trackStatistics()
5899   void trackStatistics() const override {
5900     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5901   }
5902 };
5903 
5904 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5905   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5906       : AAPrivatizablePtrFloating(IRP, A) {}
5907 
5908   /// See AbstractAttribute::initialize(...).
5909   void initialize(Attributor &A) override {
5910     // TODO: We can privatize more than arguments.
5911     indicatePessimisticFixpoint();
5912   }
5913 
5914   /// See AbstractAttribute::trackStatistics()
5915   void trackStatistics() const override {
5916     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5917   }
5918 };
5919 
5920 /// -------------------- Memory Behavior Attributes ----------------------------
5921 /// Includes read-none, read-only, and write-only.
5922 /// ----------------------------------------------------------------------------
5923 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5924   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5925       : AAMemoryBehavior(IRP, A) {}
5926 
5927   /// See AbstractAttribute::initialize(...).
5928   void initialize(Attributor &A) override {
5929     intersectAssumedBits(BEST_STATE);
5930     getKnownStateFromValue(getIRPosition(), getState());
5931     AAMemoryBehavior::initialize(A);
5932   }
5933 
5934   /// Return the memory behavior information encoded in the IR for \p IRP.
5935   static void getKnownStateFromValue(const IRPosition &IRP,
5936                                      BitIntegerState &State,
5937                                      bool IgnoreSubsumingPositions = false) {
5938     SmallVector<Attribute, 2> Attrs;
5939     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5940     for (const Attribute &Attr : Attrs) {
5941       switch (Attr.getKindAsEnum()) {
5942       case Attribute::ReadNone:
5943         State.addKnownBits(NO_ACCESSES);
5944         break;
5945       case Attribute::ReadOnly:
5946         State.addKnownBits(NO_WRITES);
5947         break;
5948       case Attribute::WriteOnly:
5949         State.addKnownBits(NO_READS);
5950         break;
5951       default:
5952         llvm_unreachable("Unexpected attribute!");
5953       }
5954     }
5955 
5956     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5957       if (!I->mayReadFromMemory())
5958         State.addKnownBits(NO_READS);
5959       if (!I->mayWriteToMemory())
5960         State.addKnownBits(NO_WRITES);
5961     }
5962   }
5963 
5964   /// See AbstractAttribute::getDeducedAttributes(...).
5965   void getDeducedAttributes(LLVMContext &Ctx,
5966                             SmallVectorImpl<Attribute> &Attrs) const override {
5967     assert(Attrs.size() == 0);
5968     if (isAssumedReadNone())
5969       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5970     else if (isAssumedReadOnly())
5971       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5972     else if (isAssumedWriteOnly())
5973       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5974     assert(Attrs.size() <= 1);
5975   }
5976 
5977   /// See AbstractAttribute::manifest(...).
5978   ChangeStatus manifest(Attributor &A) override {
5979     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5980       return ChangeStatus::UNCHANGED;
5981 
5982     const IRPosition &IRP = getIRPosition();
5983 
5984     // Check if we would improve the existing attributes first.
5985     SmallVector<Attribute, 4> DeducedAttrs;
5986     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5987     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5988           return IRP.hasAttr(Attr.getKindAsEnum(),
5989                              /* IgnoreSubsumingPositions */ true);
5990         }))
5991       return ChangeStatus::UNCHANGED;
5992 
5993     // Clear existing attributes.
5994     IRP.removeAttrs(AttrKinds);
5995 
5996     // Use the generic manifest method.
5997     return IRAttribute::manifest(A);
5998   }
5999 
6000   /// See AbstractState::getAsStr().
6001   const std::string getAsStr() const override {
6002     if (isAssumedReadNone())
6003       return "readnone";
6004     if (isAssumedReadOnly())
6005       return "readonly";
6006     if (isAssumedWriteOnly())
6007       return "writeonly";
6008     return "may-read/write";
6009   }
6010 
6011   /// The set of IR attributes AAMemoryBehavior deals with.
6012   static const Attribute::AttrKind AttrKinds[3];
6013 };
6014 
6015 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
6016     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
6017 
6018 /// Memory behavior attribute for a floating value.
6019 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
6020   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
6021       : AAMemoryBehaviorImpl(IRP, A) {}
6022 
6023   /// See AbstractAttribute::initialize(...).
6024   void initialize(Attributor &A) override {
6025     AAMemoryBehaviorImpl::initialize(A);
6026     addUsesOf(A, getAssociatedValue());
6027   }
6028 
6029   /// See AbstractAttribute::updateImpl(...).
6030   ChangeStatus updateImpl(Attributor &A) override;
6031 
6032   /// See AbstractAttribute::trackStatistics()
6033   void trackStatistics() const override {
6034     if (isAssumedReadNone())
6035       STATS_DECLTRACK_FLOATING_ATTR(readnone)
6036     else if (isAssumedReadOnly())
6037       STATS_DECLTRACK_FLOATING_ATTR(readonly)
6038     else if (isAssumedWriteOnly())
6039       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
6040   }
6041 
6042 private:
6043   /// Return true if users of \p UserI might access the underlying
6044   /// variable/location described by \p U and should therefore be analyzed.
6045   bool followUsersOfUseIn(Attributor &A, const Use *U,
6046                           const Instruction *UserI);
6047 
6048   /// Update the state according to the effect of use \p U in \p UserI.
6049   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
6050 
6051 protected:
6052   /// Add the uses of \p V to the `Uses` set we look at during the update step.
6053   void addUsesOf(Attributor &A, const Value &V);
6054 
6055   /// Container for (transitive) uses of the associated argument.
6056   SmallVector<const Use *, 8> Uses;
6057 
6058   /// Set to remember the uses we already traversed.
6059   SmallPtrSet<const Use *, 8> Visited;
6060 };
6061 
6062 /// Memory behavior attribute for function argument.
6063 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6064   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6065       : AAMemoryBehaviorFloating(IRP, A) {}
6066 
6067   /// See AbstractAttribute::initialize(...).
6068   void initialize(Attributor &A) override {
6069     intersectAssumedBits(BEST_STATE);
6070     const IRPosition &IRP = getIRPosition();
6071     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6072     // can query it when we use has/getAttr. That would allow us to reuse the
6073     // initialize of the base class here.
6074     bool HasByVal =
6075         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6076     getKnownStateFromValue(IRP, getState(),
6077                            /* IgnoreSubsumingPositions */ HasByVal);
6078 
6079     // Initialize the use vector with all direct uses of the associated value.
6080     Argument *Arg = getAssociatedArgument();
6081     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6082       indicatePessimisticFixpoint();
6083     } else {
6084       addUsesOf(A, *Arg);
6085     }
6086   }
6087 
6088   ChangeStatus manifest(Attributor &A) override {
6089     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6090     if (!getAssociatedValue().getType()->isPointerTy())
6091       return ChangeStatus::UNCHANGED;
6092 
6093     // TODO: From readattrs.ll: "inalloca parameters are always
6094     //                           considered written"
6095     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6096       removeKnownBits(NO_WRITES);
6097       removeAssumedBits(NO_WRITES);
6098     }
6099     return AAMemoryBehaviorFloating::manifest(A);
6100   }
6101 
6102   /// See AbstractAttribute::trackStatistics()
6103   void trackStatistics() const override {
6104     if (isAssumedReadNone())
6105       STATS_DECLTRACK_ARG_ATTR(readnone)
6106     else if (isAssumedReadOnly())
6107       STATS_DECLTRACK_ARG_ATTR(readonly)
6108     else if (isAssumedWriteOnly())
6109       STATS_DECLTRACK_ARG_ATTR(writeonly)
6110   }
6111 };
6112 
6113 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6114   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6115       : AAMemoryBehaviorArgument(IRP, A) {}
6116 
6117   /// See AbstractAttribute::initialize(...).
6118   void initialize(Attributor &A) override {
6119     // If we don't have an associated attribute this is either a variadic call
6120     // or an indirect call, either way, nothing to do here.
6121     Argument *Arg = getAssociatedArgument();
6122     if (!Arg) {
6123       indicatePessimisticFixpoint();
6124       return;
6125     }
6126     if (Arg->hasByValAttr()) {
6127       addKnownBits(NO_WRITES);
6128       removeKnownBits(NO_READS);
6129       removeAssumedBits(NO_READS);
6130     }
6131     AAMemoryBehaviorArgument::initialize(A);
6132     if (getAssociatedFunction()->isDeclaration())
6133       indicatePessimisticFixpoint();
6134   }
6135 
6136   /// See AbstractAttribute::updateImpl(...).
6137   ChangeStatus updateImpl(Attributor &A) override {
6138     // TODO: Once we have call site specific value information we can provide
6139     //       call site specific liveness liveness information and then it makes
6140     //       sense to specialize attributes for call sites arguments instead of
6141     //       redirecting requests to the callee argument.
6142     Argument *Arg = getAssociatedArgument();
6143     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6144     auto &ArgAA =
6145         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6146     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6147   }
6148 
6149   /// See AbstractAttribute::trackStatistics()
6150   void trackStatistics() const override {
6151     if (isAssumedReadNone())
6152       STATS_DECLTRACK_CSARG_ATTR(readnone)
6153     else if (isAssumedReadOnly())
6154       STATS_DECLTRACK_CSARG_ATTR(readonly)
6155     else if (isAssumedWriteOnly())
6156       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6157   }
6158 };
6159 
6160 /// Memory behavior attribute for a call site return position.
6161 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6162   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6163       : AAMemoryBehaviorFloating(IRP, A) {}
6164 
6165   /// See AbstractAttribute::initialize(...).
6166   void initialize(Attributor &A) override {
6167     AAMemoryBehaviorImpl::initialize(A);
6168     Function *F = getAssociatedFunction();
6169     if (!F || F->isDeclaration())
6170       indicatePessimisticFixpoint();
6171   }
6172 
6173   /// See AbstractAttribute::manifest(...).
6174   ChangeStatus manifest(Attributor &A) override {
6175     // We do not annotate returned values.
6176     return ChangeStatus::UNCHANGED;
6177   }
6178 
6179   /// See AbstractAttribute::trackStatistics()
6180   void trackStatistics() const override {}
6181 };
6182 
6183 /// An AA to represent the memory behavior function attributes.
6184 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6185   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6186       : AAMemoryBehaviorImpl(IRP, A) {}
6187 
6188   /// See AbstractAttribute::updateImpl(Attributor &A).
6189   virtual ChangeStatus updateImpl(Attributor &A) override;
6190 
6191   /// See AbstractAttribute::manifest(...).
6192   ChangeStatus manifest(Attributor &A) override {
6193     Function &F = cast<Function>(getAnchorValue());
6194     if (isAssumedReadNone()) {
6195       F.removeFnAttr(Attribute::ArgMemOnly);
6196       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6197       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6198     }
6199     return AAMemoryBehaviorImpl::manifest(A);
6200   }
6201 
6202   /// See AbstractAttribute::trackStatistics()
6203   void trackStatistics() const override {
6204     if (isAssumedReadNone())
6205       STATS_DECLTRACK_FN_ATTR(readnone)
6206     else if (isAssumedReadOnly())
6207       STATS_DECLTRACK_FN_ATTR(readonly)
6208     else if (isAssumedWriteOnly())
6209       STATS_DECLTRACK_FN_ATTR(writeonly)
6210   }
6211 };
6212 
6213 /// AAMemoryBehavior attribute for call sites.
6214 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6215   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6216       : AAMemoryBehaviorImpl(IRP, A) {}
6217 
6218   /// See AbstractAttribute::initialize(...).
6219   void initialize(Attributor &A) override {
6220     AAMemoryBehaviorImpl::initialize(A);
6221     Function *F = getAssociatedFunction();
6222     if (!F || F->isDeclaration())
6223       indicatePessimisticFixpoint();
6224   }
6225 
6226   /// See AbstractAttribute::updateImpl(...).
6227   ChangeStatus updateImpl(Attributor &A) override {
6228     // TODO: Once we have call site specific value information we can provide
6229     //       call site specific liveness liveness information and then it makes
6230     //       sense to specialize attributes for call sites arguments instead of
6231     //       redirecting requests to the callee argument.
6232     Function *F = getAssociatedFunction();
6233     const IRPosition &FnPos = IRPosition::function(*F);
6234     auto &FnAA =
6235         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6236     return clampStateAndIndicateChange(getState(), FnAA.getState());
6237   }
6238 
6239   /// See AbstractAttribute::trackStatistics()
6240   void trackStatistics() const override {
6241     if (isAssumedReadNone())
6242       STATS_DECLTRACK_CS_ATTR(readnone)
6243     else if (isAssumedReadOnly())
6244       STATS_DECLTRACK_CS_ATTR(readonly)
6245     else if (isAssumedWriteOnly())
6246       STATS_DECLTRACK_CS_ATTR(writeonly)
6247   }
6248 };
6249 
6250 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6251 
6252   // The current assumed state used to determine a change.
6253   auto AssumedState = getAssumed();
6254 
6255   auto CheckRWInst = [&](Instruction &I) {
6256     // If the instruction has an own memory behavior state, use it to restrict
6257     // the local state. No further analysis is required as the other memory
6258     // state is as optimistic as it gets.
6259     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6260       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6261           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6262       intersectAssumedBits(MemBehaviorAA.getAssumed());
6263       return !isAtFixpoint();
6264     }
6265 
6266     // Remove access kind modifiers if necessary.
6267     if (I.mayReadFromMemory())
6268       removeAssumedBits(NO_READS);
6269     if (I.mayWriteToMemory())
6270       removeAssumedBits(NO_WRITES);
6271     return !isAtFixpoint();
6272   };
6273 
6274   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6275     return indicatePessimisticFixpoint();
6276 
6277   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6278                                         : ChangeStatus::UNCHANGED;
6279 }
6280 
6281 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6282 
6283   const IRPosition &IRP = getIRPosition();
6284   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6285   AAMemoryBehavior::StateType &S = getState();
6286 
6287   // First, check the function scope. We take the known information and we avoid
6288   // work if the assumed information implies the current assumed information for
6289   // this attribute. This is a valid for all but byval arguments.
6290   Argument *Arg = IRP.getAssociatedArgument();
6291   AAMemoryBehavior::base_t FnMemAssumedState =
6292       AAMemoryBehavior::StateType::getWorstState();
6293   if (!Arg || !Arg->hasByValAttr()) {
6294     const auto &FnMemAA =
6295         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6296     FnMemAssumedState = FnMemAA.getAssumed();
6297     S.addKnownBits(FnMemAA.getKnown());
6298     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6299       return ChangeStatus::UNCHANGED;
6300   }
6301 
6302   // Make sure the value is not captured (except through "return"), if
6303   // it is, any information derived would be irrelevant anyway as we cannot
6304   // check the potential aliases introduced by the capture. However, no need
6305   // to fall back to anythign less optimistic than the function state.
6306   const auto &ArgNoCaptureAA =
6307       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6308   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6309     S.intersectAssumedBits(FnMemAssumedState);
6310     return ChangeStatus::CHANGED;
6311   }
6312 
6313   // The current assumed state used to determine a change.
6314   auto AssumedState = S.getAssumed();
6315 
6316   // Liveness information to exclude dead users.
6317   // TODO: Take the FnPos once we have call site specific liveness information.
6318   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6319       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6320       DepClassTy::NONE);
6321 
6322   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6323   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6324     const Use *U = Uses[i];
6325     Instruction *UserI = cast<Instruction>(U->getUser());
6326     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6327                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6328                       << "]\n");
6329     if (A.isAssumedDead(*U, this, &LivenessAA))
6330       continue;
6331 
6332     // Droppable users, e.g., llvm::assume does not actually perform any action.
6333     if (UserI->isDroppable())
6334       continue;
6335 
6336     // Check if the users of UserI should also be visited.
6337     if (followUsersOfUseIn(A, U, UserI))
6338       addUsesOf(A, *UserI);
6339 
6340     // If UserI might touch memory we analyze the use in detail.
6341     if (UserI->mayReadOrWriteMemory())
6342       analyzeUseIn(A, U, UserI);
6343   }
6344 
6345   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6346                                         : ChangeStatus::UNCHANGED;
6347 }
6348 
6349 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6350   SmallVector<const Use *, 8> WL;
6351   for (const Use &U : V.uses())
6352     WL.push_back(&U);
6353 
6354   while (!WL.empty()) {
6355     const Use *U = WL.pop_back_val();
6356     if (!Visited.insert(U).second)
6357       continue;
6358 
6359     const Instruction *UserI = cast<Instruction>(U->getUser());
6360     if (UserI->mayReadOrWriteMemory()) {
6361       Uses.push_back(U);
6362       continue;
6363     }
6364     if (!followUsersOfUseIn(A, U, UserI))
6365       continue;
6366     for (const Use &UU : UserI->uses())
6367       WL.push_back(&UU);
6368   }
6369 }
6370 
6371 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6372                                                   const Instruction *UserI) {
6373   // The loaded value is unrelated to the pointer argument, no need to
6374   // follow the users of the load.
6375   if (isa<LoadInst>(UserI))
6376     return false;
6377 
6378   // By default we follow all uses assuming UserI might leak information on U,
6379   // we have special handling for call sites operands though.
6380   const auto *CB = dyn_cast<CallBase>(UserI);
6381   if (!CB || !CB->isArgOperand(U))
6382     return true;
6383 
6384   // If the use is a call argument known not to be captured, the users of
6385   // the call do not need to be visited because they have to be unrelated to
6386   // the input. Note that this check is not trivial even though we disallow
6387   // general capturing of the underlying argument. The reason is that the
6388   // call might the argument "through return", which we allow and for which we
6389   // need to check call users.
6390   if (U->get()->getType()->isPointerTy()) {
6391     unsigned ArgNo = CB->getArgOperandNo(U);
6392     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6393         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6394     return !ArgNoCaptureAA.isAssumedNoCapture();
6395   }
6396 
6397   return true;
6398 }
6399 
6400 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6401                                             const Instruction *UserI) {
6402   assert(UserI->mayReadOrWriteMemory());
6403 
6404   switch (UserI->getOpcode()) {
6405   default:
6406     // TODO: Handle all atomics and other side-effect operations we know of.
6407     break;
6408   case Instruction::Load:
6409     // Loads cause the NO_READS property to disappear.
6410     removeAssumedBits(NO_READS);
6411     return;
6412 
6413   case Instruction::Store:
6414     // Stores cause the NO_WRITES property to disappear if the use is the
6415     // pointer operand. Note that we do assume that capturing was taken care of
6416     // somewhere else.
6417     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6418       removeAssumedBits(NO_WRITES);
6419     return;
6420 
6421   case Instruction::Call:
6422   case Instruction::CallBr:
6423   case Instruction::Invoke: {
6424     // For call sites we look at the argument memory behavior attribute (this
6425     // could be recursive!) in order to restrict our own state.
6426     const auto *CB = cast<CallBase>(UserI);
6427 
6428     // Give up on operand bundles.
6429     if (CB->isBundleOperand(U)) {
6430       indicatePessimisticFixpoint();
6431       return;
6432     }
6433 
6434     // Calling a function does read the function pointer, maybe write it if the
6435     // function is self-modifying.
6436     if (CB->isCallee(U)) {
6437       removeAssumedBits(NO_READS);
6438       break;
6439     }
6440 
6441     // Adjust the possible access behavior based on the information on the
6442     // argument.
6443     IRPosition Pos;
6444     if (U->get()->getType()->isPointerTy())
6445       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6446     else
6447       Pos = IRPosition::callsite_function(*CB);
6448     const auto &MemBehaviorAA =
6449         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6450     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6451     // and at least "known".
6452     intersectAssumedBits(MemBehaviorAA.getAssumed());
6453     return;
6454   }
6455   };
6456 
6457   // Generally, look at the "may-properties" and adjust the assumed state if we
6458   // did not trigger special handling before.
6459   if (UserI->mayReadFromMemory())
6460     removeAssumedBits(NO_READS);
6461   if (UserI->mayWriteToMemory())
6462     removeAssumedBits(NO_WRITES);
6463 }
6464 
6465 } // namespace
6466 
6467 /// -------------------- Memory Locations Attributes ---------------------------
6468 /// Includes read-none, argmemonly, inaccessiblememonly,
6469 /// inaccessiblememorargmemonly
6470 /// ----------------------------------------------------------------------------
6471 
6472 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6473     AAMemoryLocation::MemoryLocationsKind MLK) {
6474   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6475     return "all memory";
6476   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6477     return "no memory";
6478   std::string S = "memory:";
6479   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6480     S += "stack,";
6481   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6482     S += "constant,";
6483   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6484     S += "internal global,";
6485   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6486     S += "external global,";
6487   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6488     S += "argument,";
6489   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6490     S += "inaccessible,";
6491   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6492     S += "malloced,";
6493   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6494     S += "unknown,";
6495   S.pop_back();
6496   return S;
6497 }
6498 
6499 namespace {
6500 struct AAMemoryLocationImpl : public AAMemoryLocation {
6501 
6502   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6503       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6504     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6505       AccessKind2Accesses[u] = nullptr;
6506   }
6507 
6508   ~AAMemoryLocationImpl() {
6509     // The AccessSets are allocated via a BumpPtrAllocator, we call
6510     // the destructor manually.
6511     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6512       if (AccessKind2Accesses[u])
6513         AccessKind2Accesses[u]->~AccessSet();
6514   }
6515 
6516   /// See AbstractAttribute::initialize(...).
6517   void initialize(Attributor &A) override {
6518     intersectAssumedBits(BEST_STATE);
6519     getKnownStateFromValue(A, getIRPosition(), getState());
6520     AAMemoryLocation::initialize(A);
6521   }
6522 
6523   /// Return the memory behavior information encoded in the IR for \p IRP.
6524   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6525                                      BitIntegerState &State,
6526                                      bool IgnoreSubsumingPositions = false) {
6527     // For internal functions we ignore `argmemonly` and
6528     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6529     // constant propagation. It is unclear if this is the best way but it is
6530     // unlikely this will cause real performance problems. If we are deriving
6531     // attributes for the anchor function we even remove the attribute in
6532     // addition to ignoring it.
6533     bool UseArgMemOnly = true;
6534     Function *AnchorFn = IRP.getAnchorScope();
6535     if (AnchorFn && A.isRunOn(*AnchorFn))
6536       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6537 
6538     SmallVector<Attribute, 2> Attrs;
6539     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6540     for (const Attribute &Attr : Attrs) {
6541       switch (Attr.getKindAsEnum()) {
6542       case Attribute::ReadNone:
6543         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6544         break;
6545       case Attribute::InaccessibleMemOnly:
6546         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6547         break;
6548       case Attribute::ArgMemOnly:
6549         if (UseArgMemOnly)
6550           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6551         else
6552           IRP.removeAttrs({Attribute::ArgMemOnly});
6553         break;
6554       case Attribute::InaccessibleMemOrArgMemOnly:
6555         if (UseArgMemOnly)
6556           State.addKnownBits(inverseLocation(
6557               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6558         else
6559           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6560         break;
6561       default:
6562         llvm_unreachable("Unexpected attribute!");
6563       }
6564     }
6565   }
6566 
6567   /// See AbstractAttribute::getDeducedAttributes(...).
6568   void getDeducedAttributes(LLVMContext &Ctx,
6569                             SmallVectorImpl<Attribute> &Attrs) const override {
6570     assert(Attrs.size() == 0);
6571     if (isAssumedReadNone()) {
6572       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6573     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6574       if (isAssumedInaccessibleMemOnly())
6575         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6576       else if (isAssumedArgMemOnly())
6577         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6578       else if (isAssumedInaccessibleOrArgMemOnly())
6579         Attrs.push_back(
6580             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6581     }
6582     assert(Attrs.size() <= 1);
6583   }
6584 
6585   /// See AbstractAttribute::manifest(...).
6586   ChangeStatus manifest(Attributor &A) override {
6587     const IRPosition &IRP = getIRPosition();
6588 
6589     // Check if we would improve the existing attributes first.
6590     SmallVector<Attribute, 4> DeducedAttrs;
6591     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6592     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6593           return IRP.hasAttr(Attr.getKindAsEnum(),
6594                              /* IgnoreSubsumingPositions */ true);
6595         }))
6596       return ChangeStatus::UNCHANGED;
6597 
6598     // Clear existing attributes.
6599     IRP.removeAttrs(AttrKinds);
6600     if (isAssumedReadNone())
6601       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6602 
6603     // Use the generic manifest method.
6604     return IRAttribute::manifest(A);
6605   }
6606 
6607   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6608   bool checkForAllAccessesToMemoryKind(
6609       function_ref<bool(const Instruction *, const Value *, AccessKind,
6610                         MemoryLocationsKind)>
6611           Pred,
6612       MemoryLocationsKind RequestedMLK) const override {
6613     if (!isValidState())
6614       return false;
6615 
6616     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6617     if (AssumedMLK == NO_LOCATIONS)
6618       return true;
6619 
6620     unsigned Idx = 0;
6621     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6622          CurMLK *= 2, ++Idx) {
6623       if (CurMLK & RequestedMLK)
6624         continue;
6625 
6626       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6627         for (const AccessInfo &AI : *Accesses)
6628           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6629             return false;
6630     }
6631 
6632     return true;
6633   }
6634 
6635   ChangeStatus indicatePessimisticFixpoint() override {
6636     // If we give up and indicate a pessimistic fixpoint this instruction will
6637     // become an access for all potential access kinds:
6638     // TODO: Add pointers for argmemonly and globals to improve the results of
6639     //       checkForAllAccessesToMemoryKind.
6640     bool Changed = false;
6641     MemoryLocationsKind KnownMLK = getKnown();
6642     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6643     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6644       if (!(CurMLK & KnownMLK))
6645         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6646                                   getAccessKindFromInst(I));
6647     return AAMemoryLocation::indicatePessimisticFixpoint();
6648   }
6649 
6650 protected:
6651   /// Helper struct to tie together an instruction that has a read or write
6652   /// effect with the pointer it accesses (if any).
6653   struct AccessInfo {
6654 
6655     /// The instruction that caused the access.
6656     const Instruction *I;
6657 
6658     /// The base pointer that is accessed, or null if unknown.
6659     const Value *Ptr;
6660 
6661     /// The kind of access (read/write/read+write).
6662     AccessKind Kind;
6663 
6664     bool operator==(const AccessInfo &RHS) const {
6665       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6666     }
6667     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6668       if (LHS.I != RHS.I)
6669         return LHS.I < RHS.I;
6670       if (LHS.Ptr != RHS.Ptr)
6671         return LHS.Ptr < RHS.Ptr;
6672       if (LHS.Kind != RHS.Kind)
6673         return LHS.Kind < RHS.Kind;
6674       return false;
6675     }
6676   };
6677 
6678   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6679   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6680   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6681   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6682 
6683   /// Categorize the pointer arguments of CB that might access memory in
6684   /// AccessedLoc and update the state and access map accordingly.
6685   void
6686   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6687                                      AAMemoryLocation::StateType &AccessedLocs,
6688                                      bool &Changed);
6689 
6690   /// Return the kind(s) of location that may be accessed by \p V.
6691   AAMemoryLocation::MemoryLocationsKind
6692   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6693 
6694   /// Return the access kind as determined by \p I.
6695   AccessKind getAccessKindFromInst(const Instruction *I) {
6696     AccessKind AK = READ_WRITE;
6697     if (I) {
6698       AK = I->mayReadFromMemory() ? READ : NONE;
6699       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6700     }
6701     return AK;
6702   }
6703 
6704   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6705   /// an access of kind \p AK to a \p MLK memory location with the access
6706   /// pointer \p Ptr.
6707   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6708                                  MemoryLocationsKind MLK, const Instruction *I,
6709                                  const Value *Ptr, bool &Changed,
6710                                  AccessKind AK = READ_WRITE) {
6711 
6712     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6713     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6714     if (!Accesses)
6715       Accesses = new (Allocator) AccessSet();
6716     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6717     State.removeAssumedBits(MLK);
6718   }
6719 
6720   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6721   /// arguments, and update the state and access map accordingly.
6722   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6723                           AAMemoryLocation::StateType &State, bool &Changed);
6724 
6725   /// Used to allocate access sets.
6726   BumpPtrAllocator &Allocator;
6727 
6728   /// The set of IR attributes AAMemoryLocation deals with.
6729   static const Attribute::AttrKind AttrKinds[4];
6730 };
6731 
6732 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6733     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6734     Attribute::InaccessibleMemOrArgMemOnly};
6735 
6736 void AAMemoryLocationImpl::categorizePtrValue(
6737     Attributor &A, const Instruction &I, const Value &Ptr,
6738     AAMemoryLocation::StateType &State, bool &Changed) {
6739   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6740                     << Ptr << " ["
6741                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6742 
6743   auto StripGEPCB = [](Value *V) -> Value * {
6744     auto *GEP = dyn_cast<GEPOperator>(V);
6745     while (GEP) {
6746       V = GEP->getPointerOperand();
6747       GEP = dyn_cast<GEPOperator>(V);
6748     }
6749     return V;
6750   };
6751 
6752   auto VisitValueCB = [&](Value &V, const Instruction *,
6753                           AAMemoryLocation::StateType &T,
6754                           bool Stripped) -> bool {
6755     // TODO: recognize the TBAA used for constant accesses.
6756     MemoryLocationsKind MLK = NO_LOCATIONS;
6757     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6758     if (isa<UndefValue>(V))
6759       return true;
6760     if (auto *Arg = dyn_cast<Argument>(&V)) {
6761       if (Arg->hasByValAttr())
6762         MLK = NO_LOCAL_MEM;
6763       else
6764         MLK = NO_ARGUMENT_MEM;
6765     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6766       // Reading constant memory is not treated as a read "effect" by the
6767       // function attr pass so we won't neither. Constants defined by TBAA are
6768       // similar. (We know we do not write it because it is constant.)
6769       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6770         if (GVar->isConstant())
6771           return true;
6772 
6773       if (GV->hasLocalLinkage())
6774         MLK = NO_GLOBAL_INTERNAL_MEM;
6775       else
6776         MLK = NO_GLOBAL_EXTERNAL_MEM;
6777     } else if (isa<ConstantPointerNull>(V) &&
6778                !NullPointerIsDefined(getAssociatedFunction(),
6779                                      V.getType()->getPointerAddressSpace())) {
6780       return true;
6781     } else if (isa<AllocaInst>(V)) {
6782       MLK = NO_LOCAL_MEM;
6783     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6784       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6785           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6786       if (NoAliasAA.isAssumedNoAlias())
6787         MLK = NO_MALLOCED_MEM;
6788       else
6789         MLK = NO_UNKOWN_MEM;
6790     } else {
6791       MLK = NO_UNKOWN_MEM;
6792     }
6793 
6794     assert(MLK != NO_LOCATIONS && "No location specified!");
6795     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6796                               getAccessKindFromInst(&I));
6797     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6798                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6799                       << "\n");
6800     return true;
6801   };
6802 
6803   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6804           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6805           /* UseValueSimplify */ true,
6806           /* MaxValues */ 32, StripGEPCB)) {
6807     LLVM_DEBUG(
6808         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6809     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6810                               getAccessKindFromInst(&I));
6811   } else {
6812     LLVM_DEBUG(
6813         dbgs()
6814         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6815         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6816   }
6817 }
6818 
6819 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6820     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6821     bool &Changed) {
6822   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6823 
6824     // Skip non-pointer arguments.
6825     const Value *ArgOp = CB.getArgOperand(ArgNo);
6826     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6827       continue;
6828 
6829     // Skip readnone arguments.
6830     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6831     const auto &ArgOpMemLocationAA =
6832         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6833 
6834     if (ArgOpMemLocationAA.isAssumedReadNone())
6835       continue;
6836 
6837     // Categorize potentially accessed pointer arguments as if there was an
6838     // access instruction with them as pointer.
6839     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6840   }
6841 }
6842 
6843 AAMemoryLocation::MemoryLocationsKind
6844 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6845                                                   bool &Changed) {
6846   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6847                     << I << "\n");
6848 
6849   AAMemoryLocation::StateType AccessedLocs;
6850   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6851 
6852   if (auto *CB = dyn_cast<CallBase>(&I)) {
6853 
6854     // First check if we assume any memory is access is visible.
6855     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6856         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6857     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6858                       << " [" << CBMemLocationAA << "]\n");
6859 
6860     if (CBMemLocationAA.isAssumedReadNone())
6861       return NO_LOCATIONS;
6862 
6863     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6864       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6865                                 Changed, getAccessKindFromInst(&I));
6866       return AccessedLocs.getAssumed();
6867     }
6868 
6869     uint32_t CBAssumedNotAccessedLocs =
6870         CBMemLocationAA.getAssumedNotAccessedLocation();
6871 
6872     // Set the argmemonly and global bit as we handle them separately below.
6873     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6874         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6875 
6876     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6877       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6878         continue;
6879       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6880                                 getAccessKindFromInst(&I));
6881     }
6882 
6883     // Now handle global memory if it might be accessed. This is slightly tricky
6884     // as NO_GLOBAL_MEM has multiple bits set.
6885     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6886     if (HasGlobalAccesses) {
6887       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6888                             AccessKind Kind, MemoryLocationsKind MLK) {
6889         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6890                                   getAccessKindFromInst(&I));
6891         return true;
6892       };
6893       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6894               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6895         return AccessedLocs.getWorstState();
6896     }
6897 
6898     LLVM_DEBUG(
6899         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6900                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6901 
6902     // Now handle argument memory if it might be accessed.
6903     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6904     if (HasArgAccesses)
6905       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6906 
6907     LLVM_DEBUG(
6908         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6909                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6910 
6911     return AccessedLocs.getAssumed();
6912   }
6913 
6914   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6915     LLVM_DEBUG(
6916         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6917                << I << " [" << *Ptr << "]\n");
6918     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6919     return AccessedLocs.getAssumed();
6920   }
6921 
6922   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6923                     << I << "\n");
6924   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6925                             getAccessKindFromInst(&I));
6926   return AccessedLocs.getAssumed();
6927 }
6928 
6929 /// An AA to represent the memory behavior function attributes.
6930 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6931   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6932       : AAMemoryLocationImpl(IRP, A) {}
6933 
6934   /// See AbstractAttribute::updateImpl(Attributor &A).
6935   virtual ChangeStatus updateImpl(Attributor &A) override {
6936 
6937     const auto &MemBehaviorAA =
6938         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6939     if (MemBehaviorAA.isAssumedReadNone()) {
6940       if (MemBehaviorAA.isKnownReadNone())
6941         return indicateOptimisticFixpoint();
6942       assert(isAssumedReadNone() &&
6943              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6944       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6945       return ChangeStatus::UNCHANGED;
6946     }
6947 
6948     // The current assumed state used to determine a change.
6949     auto AssumedState = getAssumed();
6950     bool Changed = false;
6951 
6952     auto CheckRWInst = [&](Instruction &I) {
6953       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6954       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6955                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6956       removeAssumedBits(inverseLocation(MLK, false, false));
6957       // Stop once only the valid bit set in the *not assumed location*, thus
6958       // once we don't actually exclude any memory locations in the state.
6959       return getAssumedNotAccessedLocation() != VALID_STATE;
6960     };
6961 
6962     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6963       return indicatePessimisticFixpoint();
6964 
6965     Changed |= AssumedState != getAssumed();
6966     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6967   }
6968 
6969   /// See AbstractAttribute::trackStatistics()
6970   void trackStatistics() const override {
6971     if (isAssumedReadNone())
6972       STATS_DECLTRACK_FN_ATTR(readnone)
6973     else if (isAssumedArgMemOnly())
6974       STATS_DECLTRACK_FN_ATTR(argmemonly)
6975     else if (isAssumedInaccessibleMemOnly())
6976       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6977     else if (isAssumedInaccessibleOrArgMemOnly())
6978       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6979   }
6980 };
6981 
6982 /// AAMemoryLocation attribute for call sites.
6983 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6984   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6985       : AAMemoryLocationImpl(IRP, A) {}
6986 
6987   /// See AbstractAttribute::initialize(...).
6988   void initialize(Attributor &A) override {
6989     AAMemoryLocationImpl::initialize(A);
6990     Function *F = getAssociatedFunction();
6991     if (!F || F->isDeclaration())
6992       indicatePessimisticFixpoint();
6993   }
6994 
6995   /// See AbstractAttribute::updateImpl(...).
6996   ChangeStatus updateImpl(Attributor &A) override {
6997     // TODO: Once we have call site specific value information we can provide
6998     //       call site specific liveness liveness information and then it makes
6999     //       sense to specialize attributes for call sites arguments instead of
7000     //       redirecting requests to the callee argument.
7001     Function *F = getAssociatedFunction();
7002     const IRPosition &FnPos = IRPosition::function(*F);
7003     auto &FnAA =
7004         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
7005     bool Changed = false;
7006     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
7007                           AccessKind Kind, MemoryLocationsKind MLK) {
7008       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
7009                                 getAccessKindFromInst(I));
7010       return true;
7011     };
7012     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
7013       return indicatePessimisticFixpoint();
7014     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
7015   }
7016 
7017   /// See AbstractAttribute::trackStatistics()
7018   void trackStatistics() const override {
7019     if (isAssumedReadNone())
7020       STATS_DECLTRACK_CS_ATTR(readnone)
7021   }
7022 };
7023 
7024 /// ------------------ Value Constant Range Attribute -------------------------
7025 
7026 struct AAValueConstantRangeImpl : AAValueConstantRange {
7027   using StateType = IntegerRangeState;
7028   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
7029       : AAValueConstantRange(IRP, A) {}
7030 
7031   /// See AbstractAttribute::getAsStr().
7032   const std::string getAsStr() const override {
7033     std::string Str;
7034     llvm::raw_string_ostream OS(Str);
7035     OS << "range(" << getBitWidth() << ")<";
7036     getKnown().print(OS);
7037     OS << " / ";
7038     getAssumed().print(OS);
7039     OS << ">";
7040     return OS.str();
7041   }
7042 
7043   /// Helper function to get a SCEV expr for the associated value at program
7044   /// point \p I.
7045   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
7046     if (!getAnchorScope())
7047       return nullptr;
7048 
7049     ScalarEvolution *SE =
7050         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7051             *getAnchorScope());
7052 
7053     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
7054         *getAnchorScope());
7055 
7056     if (!SE || !LI)
7057       return nullptr;
7058 
7059     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7060     if (!I)
7061       return S;
7062 
7063     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7064   }
7065 
7066   /// Helper function to get a range from SCEV for the associated value at
7067   /// program point \p I.
7068   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7069                                          const Instruction *I = nullptr) const {
7070     if (!getAnchorScope())
7071       return getWorstState(getBitWidth());
7072 
7073     ScalarEvolution *SE =
7074         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7075             *getAnchorScope());
7076 
7077     const SCEV *S = getSCEV(A, I);
7078     if (!SE || !S)
7079       return getWorstState(getBitWidth());
7080 
7081     return SE->getUnsignedRange(S);
7082   }
7083 
7084   /// Helper function to get a range from LVI for the associated value at
7085   /// program point \p I.
7086   ConstantRange
7087   getConstantRangeFromLVI(Attributor &A,
7088                           const Instruction *CtxI = nullptr) const {
7089     if (!getAnchorScope())
7090       return getWorstState(getBitWidth());
7091 
7092     LazyValueInfo *LVI =
7093         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7094             *getAnchorScope());
7095 
7096     if (!LVI || !CtxI)
7097       return getWorstState(getBitWidth());
7098     return LVI->getConstantRange(&getAssociatedValue(),
7099                                  const_cast<Instruction *>(CtxI));
7100   }
7101 
7102   /// See AAValueConstantRange::getKnownConstantRange(..).
7103   ConstantRange
7104   getKnownConstantRange(Attributor &A,
7105                         const Instruction *CtxI = nullptr) const override {
7106     if (!CtxI || CtxI == getCtxI())
7107       return getKnown();
7108 
7109     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7110     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7111     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7112   }
7113 
7114   /// See AAValueConstantRange::getAssumedConstantRange(..).
7115   ConstantRange
7116   getAssumedConstantRange(Attributor &A,
7117                           const Instruction *CtxI = nullptr) const override {
7118     // TODO: Make SCEV use Attributor assumption.
7119     //       We may be able to bound a variable range via assumptions in
7120     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7121     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7122 
7123     if (!CtxI || CtxI == getCtxI())
7124       return getAssumed();
7125 
7126     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7127     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7128     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7129   }
7130 
7131   /// See AbstractAttribute::initialize(..).
7132   void initialize(Attributor &A) override {
7133     // Intersect a range given by SCEV.
7134     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7135 
7136     // Intersect a range given by LVI.
7137     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7138   }
7139 
7140   /// Helper function to create MDNode for range metadata.
7141   static MDNode *
7142   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7143                             const ConstantRange &AssumedConstantRange) {
7144     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7145                                   Ty, AssumedConstantRange.getLower())),
7146                               ConstantAsMetadata::get(ConstantInt::get(
7147                                   Ty, AssumedConstantRange.getUpper()))};
7148     return MDNode::get(Ctx, LowAndHigh);
7149   }
7150 
7151   /// Return true if \p Assumed is included in \p KnownRanges.
7152   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7153 
7154     if (Assumed.isFullSet())
7155       return false;
7156 
7157     if (!KnownRanges)
7158       return true;
7159 
7160     // If multiple ranges are annotated in IR, we give up to annotate assumed
7161     // range for now.
7162 
7163     // TODO:  If there exists a known range which containts assumed range, we
7164     // can say assumed range is better.
7165     if (KnownRanges->getNumOperands() > 2)
7166       return false;
7167 
7168     ConstantInt *Lower =
7169         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7170     ConstantInt *Upper =
7171         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7172 
7173     ConstantRange Known(Lower->getValue(), Upper->getValue());
7174     return Known.contains(Assumed) && Known != Assumed;
7175   }
7176 
7177   /// Helper function to set range metadata.
7178   static bool
7179   setRangeMetadataIfisBetterRange(Instruction *I,
7180                                   const ConstantRange &AssumedConstantRange) {
7181     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7182     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7183       if (!AssumedConstantRange.isEmptySet()) {
7184         I->setMetadata(LLVMContext::MD_range,
7185                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7186                                                  AssumedConstantRange));
7187         return true;
7188       }
7189     }
7190     return false;
7191   }
7192 
7193   /// See AbstractAttribute::manifest()
7194   ChangeStatus manifest(Attributor &A) override {
7195     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7196     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7197     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7198 
7199     auto &V = getAssociatedValue();
7200     if (!AssumedConstantRange.isEmptySet() &&
7201         !AssumedConstantRange.isSingleElement()) {
7202       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7203         assert(I == getCtxI() && "Should not annotate an instruction which is "
7204                                  "not the context instruction");
7205         if (isa<CallInst>(I) || isa<LoadInst>(I))
7206           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7207             Changed = ChangeStatus::CHANGED;
7208       }
7209     }
7210 
7211     return Changed;
7212   }
7213 };
7214 
7215 struct AAValueConstantRangeArgument final
7216     : AAArgumentFromCallSiteArguments<
7217           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7218           true /* BridgeCallBaseContext */> {
7219   using Base = AAArgumentFromCallSiteArguments<
7220       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7221       true /* BridgeCallBaseContext */>;
7222   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7223       : Base(IRP, A) {}
7224 
7225   /// See AbstractAttribute::initialize(..).
7226   void initialize(Attributor &A) override {
7227     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7228       indicatePessimisticFixpoint();
7229     } else {
7230       Base::initialize(A);
7231     }
7232   }
7233 
7234   /// See AbstractAttribute::trackStatistics()
7235   void trackStatistics() const override {
7236     STATS_DECLTRACK_ARG_ATTR(value_range)
7237   }
7238 };
7239 
7240 struct AAValueConstantRangeReturned
7241     : AAReturnedFromReturnedValues<AAValueConstantRange,
7242                                    AAValueConstantRangeImpl,
7243                                    AAValueConstantRangeImpl::StateType,
7244                                    /* PropogateCallBaseContext */ true> {
7245   using Base =
7246       AAReturnedFromReturnedValues<AAValueConstantRange,
7247                                    AAValueConstantRangeImpl,
7248                                    AAValueConstantRangeImpl::StateType,
7249                                    /* PropogateCallBaseContext */ true>;
7250   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7251       : Base(IRP, A) {}
7252 
7253   /// See AbstractAttribute::initialize(...).
7254   void initialize(Attributor &A) override {}
7255 
7256   /// See AbstractAttribute::trackStatistics()
7257   void trackStatistics() const override {
7258     STATS_DECLTRACK_FNRET_ATTR(value_range)
7259   }
7260 };
7261 
7262 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7263   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7264       : AAValueConstantRangeImpl(IRP, A) {}
7265 
7266   /// See AbstractAttribute::initialize(...).
7267   void initialize(Attributor &A) override {
7268     AAValueConstantRangeImpl::initialize(A);
7269     Value &V = getAssociatedValue();
7270 
7271     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7272       unionAssumed(ConstantRange(C->getValue()));
7273       indicateOptimisticFixpoint();
7274       return;
7275     }
7276 
7277     if (isa<UndefValue>(&V)) {
7278       // Collapse the undef state to 0.
7279       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7280       indicateOptimisticFixpoint();
7281       return;
7282     }
7283 
7284     if (isa<CallBase>(&V))
7285       return;
7286 
7287     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7288       return;
7289     // If it is a load instruction with range metadata, use it.
7290     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7291       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7292         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7293         return;
7294       }
7295 
7296     // We can work with PHI and select instruction as we traverse their operands
7297     // during update.
7298     if (isa<SelectInst>(V) || isa<PHINode>(V))
7299       return;
7300 
7301     // Otherwise we give up.
7302     indicatePessimisticFixpoint();
7303 
7304     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7305                       << getAssociatedValue() << "\n");
7306   }
7307 
7308   bool calculateBinaryOperator(
7309       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7310       const Instruction *CtxI,
7311       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7312     Value *LHS = BinOp->getOperand(0);
7313     Value *RHS = BinOp->getOperand(1);
7314     // TODO: Allow non integers as well.
7315     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7316       return false;
7317 
7318     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7319         *this, IRPosition::value(*LHS, getCallBaseContext()),
7320         DepClassTy::REQUIRED);
7321     QuerriedAAs.push_back(&LHSAA);
7322     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7323 
7324     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7325         *this, IRPosition::value(*RHS, getCallBaseContext()),
7326         DepClassTy::REQUIRED);
7327     QuerriedAAs.push_back(&RHSAA);
7328     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7329 
7330     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7331 
7332     T.unionAssumed(AssumedRange);
7333 
7334     // TODO: Track a known state too.
7335 
7336     return T.isValidState();
7337   }
7338 
7339   bool calculateCastInst(
7340       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7341       const Instruction *CtxI,
7342       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7343     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7344     // TODO: Allow non integers as well.
7345     Value &OpV = *CastI->getOperand(0);
7346     if (!OpV.getType()->isIntegerTy())
7347       return false;
7348 
7349     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7350         *this, IRPosition::value(OpV, getCallBaseContext()),
7351         DepClassTy::REQUIRED);
7352     QuerriedAAs.push_back(&OpAA);
7353     T.unionAssumed(
7354         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7355     return T.isValidState();
7356   }
7357 
7358   bool
7359   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7360                    const Instruction *CtxI,
7361                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7362     Value *LHS = CmpI->getOperand(0);
7363     Value *RHS = CmpI->getOperand(1);
7364     // TODO: Allow non integers as well.
7365     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7366       return false;
7367 
7368     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7369         *this, IRPosition::value(*LHS, getCallBaseContext()),
7370         DepClassTy::REQUIRED);
7371     QuerriedAAs.push_back(&LHSAA);
7372     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7373         *this, IRPosition::value(*RHS, getCallBaseContext()),
7374         DepClassTy::REQUIRED);
7375     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7376     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7377 
7378     // If one of them is empty set, we can't decide.
7379     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7380       return true;
7381 
7382     bool MustTrue = false, MustFalse = false;
7383 
7384     auto AllowedRegion =
7385         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7386 
7387     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7388         CmpI->getPredicate(), RHSAARange);
7389 
7390     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7391       MustFalse = true;
7392 
7393     if (SatisfyingRegion.contains(LHSAARange))
7394       MustTrue = true;
7395 
7396     assert((!MustTrue || !MustFalse) &&
7397            "Either MustTrue or MustFalse should be false!");
7398 
7399     if (MustTrue)
7400       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7401     else if (MustFalse)
7402       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7403     else
7404       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7405 
7406     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7407                       << " " << RHSAA << "\n");
7408 
7409     // TODO: Track a known state too.
7410     return T.isValidState();
7411   }
7412 
7413   /// See AbstractAttribute::updateImpl(...).
7414   ChangeStatus updateImpl(Attributor &A) override {
7415     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7416                             IntegerRangeState &T, bool Stripped) -> bool {
7417       Instruction *I = dyn_cast<Instruction>(&V);
7418       if (!I || isa<CallBase>(I)) {
7419 
7420         // If the value is not instruction, we query AA to Attributor.
7421         const auto &AA = A.getAAFor<AAValueConstantRange>(
7422             *this, IRPosition::value(V), DepClassTy::REQUIRED);
7423 
7424         // Clamp operator is not used to utilize a program point CtxI.
7425         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7426 
7427         return T.isValidState();
7428       }
7429 
7430       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7431       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7432         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7433           return false;
7434       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7435         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7436           return false;
7437       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7438         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7439           return false;
7440       } else {
7441         // Give up with other instructions.
7442         // TODO: Add other instructions
7443 
7444         T.indicatePessimisticFixpoint();
7445         return false;
7446       }
7447 
7448       // Catch circular reasoning in a pessimistic way for now.
7449       // TODO: Check how the range evolves and if we stripped anything, see also
7450       //       AADereferenceable or AAAlign for similar situations.
7451       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7452         if (QueriedAA != this)
7453           continue;
7454         // If we are in a stady state we do not need to worry.
7455         if (T.getAssumed() == getState().getAssumed())
7456           continue;
7457         T.indicatePessimisticFixpoint();
7458       }
7459 
7460       return T.isValidState();
7461     };
7462 
7463     IntegerRangeState T(getBitWidth());
7464 
7465     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7466             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7467             /* UseValueSimplify */ false))
7468       return indicatePessimisticFixpoint();
7469 
7470     return clampStateAndIndicateChange(getState(), T);
7471   }
7472 
7473   /// See AbstractAttribute::trackStatistics()
7474   void trackStatistics() const override {
7475     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7476   }
7477 };
7478 
7479 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7480   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7481       : AAValueConstantRangeImpl(IRP, A) {}
7482 
7483   /// See AbstractAttribute::initialize(...).
7484   ChangeStatus updateImpl(Attributor &A) override {
7485     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7486                      "not be called");
7487   }
7488 
7489   /// See AbstractAttribute::trackStatistics()
7490   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7491 };
7492 
7493 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7494   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7495       : AAValueConstantRangeFunction(IRP, A) {}
7496 
7497   /// See AbstractAttribute::trackStatistics()
7498   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7499 };
7500 
7501 struct AAValueConstantRangeCallSiteReturned
7502     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7503                                      AAValueConstantRangeImpl,
7504                                      AAValueConstantRangeImpl::StateType,
7505                                      /* IntroduceCallBaseContext */ true> {
7506   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7507       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7508                                        AAValueConstantRangeImpl,
7509                                        AAValueConstantRangeImpl::StateType,
7510                                        /* IntroduceCallBaseContext */ true>(IRP,
7511                                                                             A) {
7512   }
7513 
7514   /// See AbstractAttribute::initialize(...).
7515   void initialize(Attributor &A) override {
7516     // If it is a load instruction with range metadata, use the metadata.
7517     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7518       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7519         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7520 
7521     AAValueConstantRangeImpl::initialize(A);
7522   }
7523 
7524   /// See AbstractAttribute::trackStatistics()
7525   void trackStatistics() const override {
7526     STATS_DECLTRACK_CSRET_ATTR(value_range)
7527   }
7528 };
7529 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7530   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7531       : AAValueConstantRangeFloating(IRP, A) {}
7532 
7533   /// See AbstractAttribute::manifest()
7534   ChangeStatus manifest(Attributor &A) override {
7535     return ChangeStatus::UNCHANGED;
7536   }
7537 
7538   /// See AbstractAttribute::trackStatistics()
7539   void trackStatistics() const override {
7540     STATS_DECLTRACK_CSARG_ATTR(value_range)
7541   }
7542 };
7543 
7544 /// ------------------ Potential Values Attribute -------------------------
7545 
7546 struct AAPotentialValuesImpl : AAPotentialValues {
7547   using StateType = PotentialConstantIntValuesState;
7548 
7549   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7550       : AAPotentialValues(IRP, A) {}
7551 
7552   /// See AbstractAttribute::getAsStr().
7553   const std::string getAsStr() const override {
7554     std::string Str;
7555     llvm::raw_string_ostream OS(Str);
7556     OS << getState();
7557     return OS.str();
7558   }
7559 
7560   /// See AbstractAttribute::updateImpl(...).
7561   ChangeStatus updateImpl(Attributor &A) override {
7562     return indicatePessimisticFixpoint();
7563   }
7564 };
7565 
7566 struct AAPotentialValuesArgument final
7567     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7568                                       PotentialConstantIntValuesState> {
7569   using Base =
7570       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7571                                       PotentialConstantIntValuesState>;
7572   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7573       : Base(IRP, A) {}
7574 
7575   /// See AbstractAttribute::initialize(..).
7576   void initialize(Attributor &A) override {
7577     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7578       indicatePessimisticFixpoint();
7579     } else {
7580       Base::initialize(A);
7581     }
7582   }
7583 
7584   /// See AbstractAttribute::trackStatistics()
7585   void trackStatistics() const override {
7586     STATS_DECLTRACK_ARG_ATTR(potential_values)
7587   }
7588 };
7589 
7590 struct AAPotentialValuesReturned
7591     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7592   using Base =
7593       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7594   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7595       : Base(IRP, A) {}
7596 
7597   /// See AbstractAttribute::trackStatistics()
7598   void trackStatistics() const override {
7599     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7600   }
7601 };
7602 
7603 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7604   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7605       : AAPotentialValuesImpl(IRP, A) {}
7606 
7607   /// See AbstractAttribute::initialize(..).
7608   void initialize(Attributor &A) override {
7609     Value &V = getAssociatedValue();
7610 
7611     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7612       unionAssumed(C->getValue());
7613       indicateOptimisticFixpoint();
7614       return;
7615     }
7616 
7617     if (isa<UndefValue>(&V)) {
7618       unionAssumedWithUndef();
7619       indicateOptimisticFixpoint();
7620       return;
7621     }
7622 
7623     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7624       return;
7625 
7626     if (isa<SelectInst>(V) || isa<PHINode>(V))
7627       return;
7628 
7629     indicatePessimisticFixpoint();
7630 
7631     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7632                       << getAssociatedValue() << "\n");
7633   }
7634 
7635   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7636                                 const APInt &RHS) {
7637     ICmpInst::Predicate Pred = ICI->getPredicate();
7638     switch (Pred) {
7639     case ICmpInst::ICMP_UGT:
7640       return LHS.ugt(RHS);
7641     case ICmpInst::ICMP_SGT:
7642       return LHS.sgt(RHS);
7643     case ICmpInst::ICMP_EQ:
7644       return LHS.eq(RHS);
7645     case ICmpInst::ICMP_UGE:
7646       return LHS.uge(RHS);
7647     case ICmpInst::ICMP_SGE:
7648       return LHS.sge(RHS);
7649     case ICmpInst::ICMP_ULT:
7650       return LHS.ult(RHS);
7651     case ICmpInst::ICMP_SLT:
7652       return LHS.slt(RHS);
7653     case ICmpInst::ICMP_NE:
7654       return LHS.ne(RHS);
7655     case ICmpInst::ICMP_ULE:
7656       return LHS.ule(RHS);
7657     case ICmpInst::ICMP_SLE:
7658       return LHS.sle(RHS);
7659     default:
7660       llvm_unreachable("Invalid ICmp predicate!");
7661     }
7662   }
7663 
7664   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7665                                  uint32_t ResultBitWidth) {
7666     Instruction::CastOps CastOp = CI->getOpcode();
7667     switch (CastOp) {
7668     default:
7669       llvm_unreachable("unsupported or not integer cast");
7670     case Instruction::Trunc:
7671       return Src.trunc(ResultBitWidth);
7672     case Instruction::SExt:
7673       return Src.sext(ResultBitWidth);
7674     case Instruction::ZExt:
7675       return Src.zext(ResultBitWidth);
7676     case Instruction::BitCast:
7677       return Src;
7678     }
7679   }
7680 
7681   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7682                                        const APInt &LHS, const APInt &RHS,
7683                                        bool &SkipOperation, bool &Unsupported) {
7684     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7685     // Unsupported is set to true when the binary operator is not supported.
7686     // SkipOperation is set to true when UB occur with the given operand pair
7687     // (LHS, RHS).
7688     // TODO: we should look at nsw and nuw keywords to handle operations
7689     //       that create poison or undef value.
7690     switch (BinOpcode) {
7691     default:
7692       Unsupported = true;
7693       return LHS;
7694     case Instruction::Add:
7695       return LHS + RHS;
7696     case Instruction::Sub:
7697       return LHS - RHS;
7698     case Instruction::Mul:
7699       return LHS * RHS;
7700     case Instruction::UDiv:
7701       if (RHS.isNullValue()) {
7702         SkipOperation = true;
7703         return LHS;
7704       }
7705       return LHS.udiv(RHS);
7706     case Instruction::SDiv:
7707       if (RHS.isNullValue()) {
7708         SkipOperation = true;
7709         return LHS;
7710       }
7711       return LHS.sdiv(RHS);
7712     case Instruction::URem:
7713       if (RHS.isNullValue()) {
7714         SkipOperation = true;
7715         return LHS;
7716       }
7717       return LHS.urem(RHS);
7718     case Instruction::SRem:
7719       if (RHS.isNullValue()) {
7720         SkipOperation = true;
7721         return LHS;
7722       }
7723       return LHS.srem(RHS);
7724     case Instruction::Shl:
7725       return LHS.shl(RHS);
7726     case Instruction::LShr:
7727       return LHS.lshr(RHS);
7728     case Instruction::AShr:
7729       return LHS.ashr(RHS);
7730     case Instruction::And:
7731       return LHS & RHS;
7732     case Instruction::Or:
7733       return LHS | RHS;
7734     case Instruction::Xor:
7735       return LHS ^ RHS;
7736     }
7737   }
7738 
7739   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7740                                            const APInt &LHS, const APInt &RHS) {
7741     bool SkipOperation = false;
7742     bool Unsupported = false;
7743     APInt Result =
7744         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7745     if (Unsupported)
7746       return false;
7747     // If SkipOperation is true, we can ignore this operand pair (L, R).
7748     if (!SkipOperation)
7749       unionAssumed(Result);
7750     return isValidState();
7751   }
7752 
7753   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7754     auto AssumedBefore = getAssumed();
7755     Value *LHS = ICI->getOperand(0);
7756     Value *RHS = ICI->getOperand(1);
7757     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7758       return indicatePessimisticFixpoint();
7759 
7760     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7761                                                 DepClassTy::REQUIRED);
7762     if (!LHSAA.isValidState())
7763       return indicatePessimisticFixpoint();
7764 
7765     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7766                                                 DepClassTy::REQUIRED);
7767     if (!RHSAA.isValidState())
7768       return indicatePessimisticFixpoint();
7769 
7770     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7771     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7772 
7773     // TODO: make use of undef flag to limit potential values aggressively.
7774     bool MaybeTrue = false, MaybeFalse = false;
7775     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7776     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7777       // The result of any comparison between undefs can be soundly replaced
7778       // with undef.
7779       unionAssumedWithUndef();
7780     } else if (LHSAA.undefIsContained()) {
7781       bool MaybeTrue = false, MaybeFalse = false;
7782       for (const APInt &R : RHSAAPVS) {
7783         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7784         MaybeTrue |= CmpResult;
7785         MaybeFalse |= !CmpResult;
7786         if (MaybeTrue & MaybeFalse)
7787           return indicatePessimisticFixpoint();
7788       }
7789     } else if (RHSAA.undefIsContained()) {
7790       for (const APInt &L : LHSAAPVS) {
7791         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7792         MaybeTrue |= CmpResult;
7793         MaybeFalse |= !CmpResult;
7794         if (MaybeTrue & MaybeFalse)
7795           return indicatePessimisticFixpoint();
7796       }
7797     } else {
7798       for (const APInt &L : LHSAAPVS) {
7799         for (const APInt &R : RHSAAPVS) {
7800           bool CmpResult = calculateICmpInst(ICI, L, R);
7801           MaybeTrue |= CmpResult;
7802           MaybeFalse |= !CmpResult;
7803           if (MaybeTrue & MaybeFalse)
7804             return indicatePessimisticFixpoint();
7805         }
7806       }
7807     }
7808     if (MaybeTrue)
7809       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7810     if (MaybeFalse)
7811       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7812     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7813                                          : ChangeStatus::CHANGED;
7814   }
7815 
7816   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7817     auto AssumedBefore = getAssumed();
7818     Value *LHS = SI->getTrueValue();
7819     Value *RHS = SI->getFalseValue();
7820     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7821       return indicatePessimisticFixpoint();
7822 
7823     // TODO: Use assumed simplified condition value
7824     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7825                                                 DepClassTy::REQUIRED);
7826     if (!LHSAA.isValidState())
7827       return indicatePessimisticFixpoint();
7828 
7829     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7830                                                 DepClassTy::REQUIRED);
7831     if (!RHSAA.isValidState())
7832       return indicatePessimisticFixpoint();
7833 
7834     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7835       // select i1 *, undef , undef => undef
7836       unionAssumedWithUndef();
7837     else {
7838       unionAssumed(LHSAA);
7839       unionAssumed(RHSAA);
7840     }
7841     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7842                                          : ChangeStatus::CHANGED;
7843   }
7844 
7845   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7846     auto AssumedBefore = getAssumed();
7847     if (!CI->isIntegerCast())
7848       return indicatePessimisticFixpoint();
7849     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7850     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7851     Value *Src = CI->getOperand(0);
7852     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7853                                                 DepClassTy::REQUIRED);
7854     if (!SrcAA.isValidState())
7855       return indicatePessimisticFixpoint();
7856     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7857     if (SrcAA.undefIsContained())
7858       unionAssumedWithUndef();
7859     else {
7860       for (const APInt &S : SrcAAPVS) {
7861         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7862         unionAssumed(T);
7863       }
7864     }
7865     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7866                                          : ChangeStatus::CHANGED;
7867   }
7868 
7869   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7870     auto AssumedBefore = getAssumed();
7871     Value *LHS = BinOp->getOperand(0);
7872     Value *RHS = BinOp->getOperand(1);
7873     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7874       return indicatePessimisticFixpoint();
7875 
7876     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7877                                                 DepClassTy::REQUIRED);
7878     if (!LHSAA.isValidState())
7879       return indicatePessimisticFixpoint();
7880 
7881     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7882                                                 DepClassTy::REQUIRED);
7883     if (!RHSAA.isValidState())
7884       return indicatePessimisticFixpoint();
7885 
7886     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7887     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7888     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7889 
7890     // TODO: make use of undef flag to limit potential values aggressively.
7891     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7892       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7893         return indicatePessimisticFixpoint();
7894     } else if (LHSAA.undefIsContained()) {
7895       for (const APInt &R : RHSAAPVS) {
7896         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7897           return indicatePessimisticFixpoint();
7898       }
7899     } else if (RHSAA.undefIsContained()) {
7900       for (const APInt &L : LHSAAPVS) {
7901         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7902           return indicatePessimisticFixpoint();
7903       }
7904     } else {
7905       for (const APInt &L : LHSAAPVS) {
7906         for (const APInt &R : RHSAAPVS) {
7907           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7908             return indicatePessimisticFixpoint();
7909         }
7910       }
7911     }
7912     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7913                                          : ChangeStatus::CHANGED;
7914   }
7915 
7916   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7917     auto AssumedBefore = getAssumed();
7918     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7919       Value *IncomingValue = PHI->getIncomingValue(u);
7920       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7921           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7922       if (!PotentialValuesAA.isValidState())
7923         return indicatePessimisticFixpoint();
7924       if (PotentialValuesAA.undefIsContained())
7925         unionAssumedWithUndef();
7926       else
7927         unionAssumed(PotentialValuesAA.getAssumed());
7928     }
7929     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7930                                          : ChangeStatus::CHANGED;
7931   }
7932 
7933   /// See AbstractAttribute::updateImpl(...).
7934   ChangeStatus updateImpl(Attributor &A) override {
7935     Value &V = getAssociatedValue();
7936     Instruction *I = dyn_cast<Instruction>(&V);
7937 
7938     if (auto *ICI = dyn_cast<ICmpInst>(I))
7939       return updateWithICmpInst(A, ICI);
7940 
7941     if (auto *SI = dyn_cast<SelectInst>(I))
7942       return updateWithSelectInst(A, SI);
7943 
7944     if (auto *CI = dyn_cast<CastInst>(I))
7945       return updateWithCastInst(A, CI);
7946 
7947     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7948       return updateWithBinaryOperator(A, BinOp);
7949 
7950     if (auto *PHI = dyn_cast<PHINode>(I))
7951       return updateWithPHINode(A, PHI);
7952 
7953     return indicatePessimisticFixpoint();
7954   }
7955 
7956   /// See AbstractAttribute::trackStatistics()
7957   void trackStatistics() const override {
7958     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7959   }
7960 };
7961 
7962 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7963   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7964       : AAPotentialValuesImpl(IRP, A) {}
7965 
7966   /// See AbstractAttribute::initialize(...).
7967   ChangeStatus updateImpl(Attributor &A) override {
7968     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7969                      "not be called");
7970   }
7971 
7972   /// See AbstractAttribute::trackStatistics()
7973   void trackStatistics() const override {
7974     STATS_DECLTRACK_FN_ATTR(potential_values)
7975   }
7976 };
7977 
7978 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7979   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7980       : AAPotentialValuesFunction(IRP, A) {}
7981 
7982   /// See AbstractAttribute::trackStatistics()
7983   void trackStatistics() const override {
7984     STATS_DECLTRACK_CS_ATTR(potential_values)
7985   }
7986 };
7987 
7988 struct AAPotentialValuesCallSiteReturned
7989     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7990   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7991       : AACallSiteReturnedFromReturned<AAPotentialValues,
7992                                        AAPotentialValuesImpl>(IRP, A) {}
7993 
7994   /// See AbstractAttribute::trackStatistics()
7995   void trackStatistics() const override {
7996     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7997   }
7998 };
7999 
8000 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
8001   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
8002       : AAPotentialValuesFloating(IRP, A) {}
8003 
8004   /// See AbstractAttribute::initialize(..).
8005   void initialize(Attributor &A) override {
8006     Value &V = getAssociatedValue();
8007 
8008     if (auto *C = dyn_cast<ConstantInt>(&V)) {
8009       unionAssumed(C->getValue());
8010       indicateOptimisticFixpoint();
8011       return;
8012     }
8013 
8014     if (isa<UndefValue>(&V)) {
8015       unionAssumedWithUndef();
8016       indicateOptimisticFixpoint();
8017       return;
8018     }
8019   }
8020 
8021   /// See AbstractAttribute::updateImpl(...).
8022   ChangeStatus updateImpl(Attributor &A) override {
8023     Value &V = getAssociatedValue();
8024     auto AssumedBefore = getAssumed();
8025     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
8026                                              DepClassTy::REQUIRED);
8027     const auto &S = AA.getAssumed();
8028     unionAssumed(S);
8029     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
8030                                          : ChangeStatus::CHANGED;
8031   }
8032 
8033   /// See AbstractAttribute::trackStatistics()
8034   void trackStatistics() const override {
8035     STATS_DECLTRACK_CSARG_ATTR(potential_values)
8036   }
8037 };
8038 
8039 /// ------------------------ NoUndef Attribute ---------------------------------
8040 struct AANoUndefImpl : AANoUndef {
8041   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
8042 
8043   /// See AbstractAttribute::initialize(...).
8044   void initialize(Attributor &A) override {
8045     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
8046       indicateOptimisticFixpoint();
8047       return;
8048     }
8049     Value &V = getAssociatedValue();
8050     if (isa<UndefValue>(V))
8051       indicatePessimisticFixpoint();
8052     else if (isa<FreezeInst>(V))
8053       indicateOptimisticFixpoint();
8054     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
8055              isGuaranteedNotToBeUndefOrPoison(&V))
8056       indicateOptimisticFixpoint();
8057     else
8058       AANoUndef::initialize(A);
8059   }
8060 
8061   /// See followUsesInMBEC
8062   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8063                        AANoUndef::StateType &State) {
8064     const Value *UseV = U->get();
8065     const DominatorTree *DT = nullptr;
8066     AssumptionCache *AC = nullptr;
8067     InformationCache &InfoCache = A.getInfoCache();
8068     if (Function *F = getAnchorScope()) {
8069       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8070       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8071     }
8072     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8073     bool TrackUse = false;
8074     // Track use for instructions which must produce undef or poison bits when
8075     // at least one operand contains such bits.
8076     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8077       TrackUse = true;
8078     return TrackUse;
8079   }
8080 
8081   /// See AbstractAttribute::getAsStr().
8082   const std::string getAsStr() const override {
8083     return getAssumed() ? "noundef" : "may-undef-or-poison";
8084   }
8085 
8086   ChangeStatus manifest(Attributor &A) override {
8087     // We don't manifest noundef attribute for dead positions because the
8088     // associated values with dead positions would be replaced with undef
8089     // values.
8090     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8091       return ChangeStatus::UNCHANGED;
8092     // A position whose simplified value does not have any value is
8093     // considered to be dead. We don't manifest noundef in such positions for
8094     // the same reason above.
8095     auto &ValueSimplifyAA =
8096         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8097     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8098       return ChangeStatus::UNCHANGED;
8099     return AANoUndef::manifest(A);
8100   }
8101 };
8102 
8103 struct AANoUndefFloating : public AANoUndefImpl {
8104   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8105       : AANoUndefImpl(IRP, A) {}
8106 
8107   /// See AbstractAttribute::initialize(...).
8108   void initialize(Attributor &A) override {
8109     AANoUndefImpl::initialize(A);
8110     if (!getState().isAtFixpoint())
8111       if (Instruction *CtxI = getCtxI())
8112         followUsesInMBEC(*this, A, getState(), *CtxI);
8113   }
8114 
8115   /// See AbstractAttribute::updateImpl(...).
8116   ChangeStatus updateImpl(Attributor &A) override {
8117     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8118                             AANoUndef::StateType &T, bool Stripped) -> bool {
8119       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8120                                              DepClassTy::REQUIRED);
8121       if (!Stripped && this == &AA) {
8122         T.indicatePessimisticFixpoint();
8123       } else {
8124         const AANoUndef::StateType &S =
8125             static_cast<const AANoUndef::StateType &>(AA.getState());
8126         T ^= S;
8127       }
8128       return T.isValidState();
8129     };
8130 
8131     StateType T;
8132     if (!genericValueTraversal<AANoUndef, StateType>(
8133             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8134       return indicatePessimisticFixpoint();
8135 
8136     return clampStateAndIndicateChange(getState(), T);
8137   }
8138 
8139   /// See AbstractAttribute::trackStatistics()
8140   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8141 };
8142 
8143 struct AANoUndefReturned final
8144     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8145   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8146       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8147 
8148   /// See AbstractAttribute::trackStatistics()
8149   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8150 };
8151 
8152 struct AANoUndefArgument final
8153     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8154   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8155       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8156 
8157   /// See AbstractAttribute::trackStatistics()
8158   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8159 };
8160 
8161 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8162   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8163       : AANoUndefFloating(IRP, A) {}
8164 
8165   /// See AbstractAttribute::trackStatistics()
8166   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8167 };
8168 
8169 struct AANoUndefCallSiteReturned final
8170     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8171   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8172       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8173 
8174   /// See AbstractAttribute::trackStatistics()
8175   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8176 };
8177 } // namespace
8178 
8179 const char AAReturnedValues::ID = 0;
8180 const char AANoUnwind::ID = 0;
8181 const char AANoSync::ID = 0;
8182 const char AANoFree::ID = 0;
8183 const char AANonNull::ID = 0;
8184 const char AANoRecurse::ID = 0;
8185 const char AAWillReturn::ID = 0;
8186 const char AAUndefinedBehavior::ID = 0;
8187 const char AANoAlias::ID = 0;
8188 const char AAReachability::ID = 0;
8189 const char AANoReturn::ID = 0;
8190 const char AAIsDead::ID = 0;
8191 const char AADereferenceable::ID = 0;
8192 const char AAAlign::ID = 0;
8193 const char AANoCapture::ID = 0;
8194 const char AAValueSimplify::ID = 0;
8195 const char AAHeapToStack::ID = 0;
8196 const char AAPrivatizablePtr::ID = 0;
8197 const char AAMemoryBehavior::ID = 0;
8198 const char AAMemoryLocation::ID = 0;
8199 const char AAValueConstantRange::ID = 0;
8200 const char AAPotentialValues::ID = 0;
8201 const char AANoUndef::ID = 0;
8202 
8203 // Macro magic to create the static generator function for attributes that
8204 // follow the naming scheme.
8205 
8206 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8207   case IRPosition::PK:                                                         \
8208     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8209 
8210 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8211   case IRPosition::PK:                                                         \
8212     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8213     ++NumAAs;                                                                  \
8214     break;
8215 
8216 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8217   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8218     CLASS *AA = nullptr;                                                       \
8219     switch (IRP.getPositionKind()) {                                           \
8220       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8221       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8222       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8223       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8224       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8225       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8226       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8227       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8228     }                                                                          \
8229     return *AA;                                                                \
8230   }
8231 
8232 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8233   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8234     CLASS *AA = nullptr;                                                       \
8235     switch (IRP.getPositionKind()) {                                           \
8236       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8237       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8238       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8239       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8240       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8241       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8242       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8243       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8244     }                                                                          \
8245     return *AA;                                                                \
8246   }
8247 
8248 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8249   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8250     CLASS *AA = nullptr;                                                       \
8251     switch (IRP.getPositionKind()) {                                           \
8252       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8253       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8254       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8255       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8256       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8257       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8258       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8259       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8260     }                                                                          \
8261     return *AA;                                                                \
8262   }
8263 
8264 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8265   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8266     CLASS *AA = nullptr;                                                       \
8267     switch (IRP.getPositionKind()) {                                           \
8268       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8269       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8270       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8271       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8272       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8273       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8274       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8275       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8276     }                                                                          \
8277     return *AA;                                                                \
8278   }
8279 
8280 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8281   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8282     CLASS *AA = nullptr;                                                       \
8283     switch (IRP.getPositionKind()) {                                           \
8284       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8285       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8286       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8287       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8288       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8289       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8290       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8291       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8292     }                                                                          \
8293     return *AA;                                                                \
8294   }
8295 
8296 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8297 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8298 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8299 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8300 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8301 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8302 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8303 
8304 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8305 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8306 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8307 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8308 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8309 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8310 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8311 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8312 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8313 
8314 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8315 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8316 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8317 
8318 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8319 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8320 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8321 
8322 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8323 
8324 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8325 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8326 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8327 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8328 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8329 #undef SWITCH_PK_CREATE
8330 #undef SWITCH_PK_INV
8331