1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (!AllowVolatile && I->isVolatile())
159     return nullptr;
160 
161   if (auto *LI = dyn_cast<LoadInst>(I)) {
162     return LI->getPointerOperand();
163   }
164 
165   if (auto *SI = dyn_cast<StoreInst>(I)) {
166     return SI->getPointerOperand();
167   }
168 
169   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
170     return CXI->getPointerOperand();
171   }
172 
173   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
174     return RMWI->getPointerOperand();
175   }
176 
177   return nullptr;
178 }
179 
180 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
181 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
182 /// getelement pointer instructions that traverse the natural type of \p Ptr if
183 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
184 /// through a cast to i8*.
185 ///
186 /// TODO: This could probably live somewhere more prominantly if it doesn't
187 ///       already exist.
188 static Value *constructPointer(Type *ResTy, Type *PtrElemTy, Value *Ptr,
189                                int64_t Offset, IRBuilder<NoFolder> &IRB,
190                                const DataLayout &DL) {
191   assert(Offset >= 0 && "Negative offset not supported yet!");
192   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
193                     << "-bytes as " << *ResTy << "\n");
194 
195   if (Offset) {
196     SmallVector<Value *, 4> Indices;
197     std::string GEPName = Ptr->getName().str() + ".0";
198 
199     // Add 0 index to look through the pointer.
200     assert((uint64_t)Offset < DL.getTypeAllocSize(PtrElemTy) &&
201            "Offset out of bounds");
202     Indices.push_back(Constant::getNullValue(IRB.getInt32Ty()));
203 
204     Type *Ty = PtrElemTy;
205     do {
206       auto *STy = dyn_cast<StructType>(Ty);
207       if (!STy)
208         // Non-aggregate type, we cast and make byte-wise progress now.
209         break;
210 
211       const StructLayout *SL = DL.getStructLayout(STy);
212       if (int64_t(SL->getSizeInBytes()) < Offset)
213         break;
214 
215       uint64_t Idx = SL->getElementContainingOffset(Offset);
216       assert(Idx < STy->getNumElements() && "Offset calculation error!");
217       uint64_t Rem = Offset - SL->getElementOffset(Idx);
218       Ty = STy->getElementType(Idx);
219 
220       LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
221                         << " Idx: " << Idx << " Rem: " << Rem << "\n");
222 
223       GEPName += "." + std::to_string(Idx);
224       Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
225       Offset = Rem;
226     } while (Offset);
227 
228     // Create a GEP for the indices collected above.
229     Ptr = IRB.CreateGEP(PtrElemTy, Ptr, Indices, GEPName);
230 
231     // If an offset is left we use byte-wise adjustment.
232     if (Offset) {
233       Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
234       Ptr = IRB.CreateGEP(IRB.getInt8Ty(), Ptr, IRB.getInt32(Offset),
235                           GEPName + ".b" + Twine(Offset));
236     }
237   }
238 
239   // Ensure the result has the requested type.
240   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
241 
242   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
243   return Ptr;
244 }
245 
246 /// Recursively visit all values that might become \p IRP at some point. This
247 /// will be done by looking through cast instructions, selects, phis, and calls
248 /// with the "returned" attribute. Once we cannot look through the value any
249 /// further, the callback \p VisitValueCB is invoked and passed the current
250 /// value, the \p State, and a flag to indicate if we stripped anything.
251 /// Stripped means that we unpacked the value associated with \p IRP at least
252 /// once. Note that the value used for the callback may still be the value
253 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
254 /// we will never visit more values than specified by \p MaxValues.
255 template <typename AAType, typename StateTy>
256 static bool genericValueTraversal(
257     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
258     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
259         VisitValueCB,
260     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
261     function_ref<Value *(Value *)> StripCB = nullptr) {
262 
263   const AAIsDead *LivenessAA = nullptr;
264   if (IRP.getAnchorScope())
265     LivenessAA = &A.getAAFor<AAIsDead>(
266         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
267         DepClassTy::NONE);
268   bool AnyDead = false;
269 
270   using Item = std::pair<Value *, const Instruction *>;
271   SmallSet<Item, 16> Visited;
272   SmallVector<Item, 16> Worklist;
273   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
274 
275   int Iteration = 0;
276   do {
277     Item I = Worklist.pop_back_val();
278     Value *V = I.first;
279     CtxI = I.second;
280     if (StripCB)
281       V = StripCB(V);
282 
283     // Check if we should process the current value. To prevent endless
284     // recursion keep a record of the values we followed!
285     if (!Visited.insert(I).second)
286       continue;
287 
288     // Make sure we limit the compile time for complex expressions.
289     if (Iteration++ >= MaxValues)
290       return false;
291 
292     // Explicitly look through calls with a "returned" attribute if we do
293     // not have a pointer as stripPointerCasts only works on them.
294     Value *NewV = nullptr;
295     if (V->getType()->isPointerTy()) {
296       NewV = V->stripPointerCasts();
297     } else {
298       auto *CB = dyn_cast<CallBase>(V);
299       if (CB && CB->getCalledFunction()) {
300         for (Argument &Arg : CB->getCalledFunction()->args())
301           if (Arg.hasReturnedAttr()) {
302             NewV = CB->getArgOperand(Arg.getArgNo());
303             break;
304           }
305       }
306     }
307     if (NewV && NewV != V) {
308       Worklist.push_back({NewV, CtxI});
309       continue;
310     }
311 
312     // Look through select instructions, visit both potential values.
313     if (auto *SI = dyn_cast<SelectInst>(V)) {
314       Worklist.push_back({SI->getTrueValue(), CtxI});
315       Worklist.push_back({SI->getFalseValue(), CtxI});
316       continue;
317     }
318 
319     // Look through phi nodes, visit all live operands.
320     if (auto *PHI = dyn_cast<PHINode>(V)) {
321       assert(LivenessAA &&
322              "Expected liveness in the presence of instructions!");
323       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
324         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
325         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
326                             LivenessAA,
327                             /* CheckBBLivenessOnly */ true)) {
328           AnyDead = true;
329           continue;
330         }
331         Worklist.push_back(
332             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
333       }
334       continue;
335     }
336 
337     if (UseValueSimplify && !isa<Constant>(V)) {
338       bool UsedAssumedInformation = false;
339       Optional<Constant *> C =
340           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
341       if (!C.hasValue())
342         continue;
343       if (Value *NewV = C.getValue()) {
344         Worklist.push_back({NewV, CtxI});
345         continue;
346       }
347     }
348 
349     // Once a leaf is reached we inform the user through the callback.
350     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
351       return false;
352   } while (!Worklist.empty());
353 
354   // If we actually used liveness information so we have to record a dependence.
355   if (AnyDead)
356     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
357 
358   // All values have been visited.
359   return true;
360 }
361 
362 const Value *stripAndAccumulateMinimalOffsets(
363     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
364     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
365     bool UseAssumed = false) {
366 
367   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
368     const IRPosition &Pos = IRPosition::value(V);
369     // Only track dependence if we are going to use the assumed info.
370     const AAValueConstantRange &ValueConstantRangeAA =
371         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
372                                          UseAssumed ? DepClassTy::OPTIONAL
373                                                     : DepClassTy::NONE);
374     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
375                                      : ValueConstantRangeAA.getKnown();
376     // We can only use the lower part of the range because the upper part can
377     // be higher than what the value can really be.
378     ROffset = Range.getSignedMin();
379     return true;
380   };
381 
382   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
383                                                 AttributorAnalysis);
384 }
385 
386 static const Value *getMinimalBaseOfAccsesPointerOperand(
387     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
388     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
389   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
390   if (!Ptr)
391     return nullptr;
392   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
393   const Value *Base = stripAndAccumulateMinimalOffsets(
394       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
395 
396   BytesOffset = OffsetAPInt.getSExtValue();
397   return Base;
398 }
399 
400 static const Value *
401 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
402                                      const DataLayout &DL,
403                                      bool AllowNonInbounds = false) {
404   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
405   if (!Ptr)
406     return nullptr;
407 
408   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
409                                           AllowNonInbounds);
410 }
411 
412 /// Helper function to clamp a state \p S of type \p StateType with the
413 /// information in \p R and indicate/return if \p S did change (as-in update is
414 /// required to be run again).
415 template <typename StateType>
416 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
417   auto Assumed = S.getAssumed();
418   S ^= R;
419   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
420                                    : ChangeStatus::CHANGED;
421 }
422 
423 /// Clamp the information known for all returned values of a function
424 /// (identified by \p QueryingAA) into \p S.
425 template <typename AAType, typename StateType = typename AAType::StateType>
426 static void clampReturnedValueStates(
427     Attributor &A, const AAType &QueryingAA, StateType &S,
428     const IRPosition::CallBaseContext *CBContext = nullptr) {
429   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
430                     << QueryingAA << " into " << S << "\n");
431 
432   assert((QueryingAA.getIRPosition().getPositionKind() ==
433               IRPosition::IRP_RETURNED ||
434           QueryingAA.getIRPosition().getPositionKind() ==
435               IRPosition::IRP_CALL_SITE_RETURNED) &&
436          "Can only clamp returned value states for a function returned or call "
437          "site returned position!");
438 
439   // Use an optional state as there might not be any return values and we want
440   // to join (IntegerState::operator&) the state of all there are.
441   Optional<StateType> T;
442 
443   // Callback for each possibly returned value.
444   auto CheckReturnValue = [&](Value &RV) -> bool {
445     const IRPosition &RVPos = IRPosition::value(RV, CBContext);
446     const AAType &AA =
447         A.getAAFor<AAType>(QueryingAA, RVPos, DepClassTy::REQUIRED);
448     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
449                       << " @ " << RVPos << "\n");
450     const StateType &AAS = AA.getState();
451     if (T.hasValue())
452       *T &= AAS;
453     else
454       T = AAS;
455     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
456                       << "\n");
457     return T->isValidState();
458   };
459 
460   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
461     S.indicatePessimisticFixpoint();
462   else if (T.hasValue())
463     S ^= *T;
464 }
465 
466 /// Helper class for generic deduction: return value -> returned position.
467 template <typename AAType, typename BaseType,
468           typename StateType = typename BaseType::StateType,
469           bool PropagateCallBaseContext = false>
470 struct AAReturnedFromReturnedValues : public BaseType {
471   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
472       : BaseType(IRP, A) {}
473 
474   /// See AbstractAttribute::updateImpl(...).
475   ChangeStatus updateImpl(Attributor &A) override {
476     StateType S(StateType::getBestState(this->getState()));
477     clampReturnedValueStates<AAType, StateType>(
478         A, *this, S,
479         PropagateCallBaseContext ? this->getCallBaseContext() : nullptr);
480     // TODO: If we know we visited all returned values, thus no are assumed
481     // dead, we can take the known information from the state T.
482     return clampStateAndIndicateChange<StateType>(this->getState(), S);
483   }
484 };
485 
486 /// Clamp the information known at all call sites for a given argument
487 /// (identified by \p QueryingAA) into \p S.
488 template <typename AAType, typename StateType = typename AAType::StateType>
489 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
490                                         StateType &S) {
491   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
492                     << QueryingAA << " into " << S << "\n");
493 
494   assert(QueryingAA.getIRPosition().getPositionKind() ==
495              IRPosition::IRP_ARGUMENT &&
496          "Can only clamp call site argument states for an argument position!");
497 
498   // Use an optional state as there might not be any return values and we want
499   // to join (IntegerState::operator&) the state of all there are.
500   Optional<StateType> T;
501 
502   // The argument number which is also the call site argument number.
503   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
504 
505   auto CallSiteCheck = [&](AbstractCallSite ACS) {
506     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
507     // Check if a coresponding argument was found or if it is on not associated
508     // (which can happen for callback calls).
509     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
510       return false;
511 
512     const AAType &AA =
513         A.getAAFor<AAType>(QueryingAA, ACSArgPos, DepClassTy::REQUIRED);
514     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
515                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
516     const StateType &AAS = AA.getState();
517     if (T.hasValue())
518       *T &= AAS;
519     else
520       T = AAS;
521     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
522                       << "\n");
523     return T->isValidState();
524   };
525 
526   bool AllCallSitesKnown;
527   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
528                               AllCallSitesKnown))
529     S.indicatePessimisticFixpoint();
530   else if (T.hasValue())
531     S ^= *T;
532 }
533 
534 /// This function is the bridge between argument position and the call base
535 /// context.
536 template <typename AAType, typename BaseType,
537           typename StateType = typename AAType::StateType>
538 bool getArgumentStateFromCallBaseContext(Attributor &A,
539                                          BaseType &QueryingAttribute,
540                                          IRPosition &Pos, StateType &State) {
541   assert((Pos.getPositionKind() == IRPosition::IRP_ARGUMENT) &&
542          "Expected an 'argument' position !");
543   const CallBase *CBContext = Pos.getCallBaseContext();
544   if (!CBContext)
545     return false;
546 
547   int ArgNo = Pos.getCallSiteArgNo();
548   assert(ArgNo >= 0 && "Invalid Arg No!");
549 
550   const auto &AA = A.getAAFor<AAType>(
551       QueryingAttribute, IRPosition::callsite_argument(*CBContext, ArgNo),
552       DepClassTy::REQUIRED);
553   const StateType &CBArgumentState =
554       static_cast<const StateType &>(AA.getState());
555 
556   LLVM_DEBUG(dbgs() << "[Attributor] Briding Call site context to argument"
557                     << "Position:" << Pos << "CB Arg state:" << CBArgumentState
558                     << "\n");
559 
560   // NOTE: If we want to do call site grouping it should happen here.
561   State ^= CBArgumentState;
562   return true;
563 }
564 
565 /// Helper class for generic deduction: call site argument -> argument position.
566 template <typename AAType, typename BaseType,
567           typename StateType = typename AAType::StateType,
568           bool BridgeCallBaseContext = false>
569 struct AAArgumentFromCallSiteArguments : public BaseType {
570   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
571       : BaseType(IRP, A) {}
572 
573   /// See AbstractAttribute::updateImpl(...).
574   ChangeStatus updateImpl(Attributor &A) override {
575     StateType S = StateType::getBestState(this->getState());
576 
577     if (BridgeCallBaseContext) {
578       bool Success =
579           getArgumentStateFromCallBaseContext<AAType, BaseType, StateType>(
580               A, *this, this->getIRPosition(), S);
581       if (Success)
582         return clampStateAndIndicateChange<StateType>(this->getState(), S);
583     }
584     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
585 
586     // TODO: If we know we visited all incoming values, thus no are assumed
587     // dead, we can take the known information from the state T.
588     return clampStateAndIndicateChange<StateType>(this->getState(), S);
589   }
590 };
591 
592 /// Helper class for generic replication: function returned -> cs returned.
593 template <typename AAType, typename BaseType,
594           typename StateType = typename BaseType::StateType,
595           bool IntroduceCallBaseContext = false>
596 struct AACallSiteReturnedFromReturned : public BaseType {
597   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
598       : BaseType(IRP, A) {}
599 
600   /// See AbstractAttribute::updateImpl(...).
601   ChangeStatus updateImpl(Attributor &A) override {
602     assert(this->getIRPosition().getPositionKind() ==
603                IRPosition::IRP_CALL_SITE_RETURNED &&
604            "Can only wrap function returned positions for call site returned "
605            "positions!");
606     auto &S = this->getState();
607 
608     const Function *AssociatedFunction =
609         this->getIRPosition().getAssociatedFunction();
610     if (!AssociatedFunction)
611       return S.indicatePessimisticFixpoint();
612 
613     CallBase &CBContext = static_cast<CallBase &>(this->getAnchorValue());
614     if (IntroduceCallBaseContext)
615       LLVM_DEBUG(dbgs() << "[Attributor] Introducing call base context:"
616                         << CBContext << "\n");
617 
618     IRPosition FnPos = IRPosition::returned(
619         *AssociatedFunction, IntroduceCallBaseContext ? &CBContext : nullptr);
620     const AAType &AA = A.getAAFor<AAType>(*this, FnPos, DepClassTy::REQUIRED);
621     return clampStateAndIndicateChange(S, AA.getState());
622   }
623 };
624 
625 /// Helper function to accumulate uses.
626 template <class AAType, typename StateType = typename AAType::StateType>
627 static void followUsesInContext(AAType &AA, Attributor &A,
628                                 MustBeExecutedContextExplorer &Explorer,
629                                 const Instruction *CtxI,
630                                 SetVector<const Use *> &Uses,
631                                 StateType &State) {
632   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
633   for (unsigned u = 0; u < Uses.size(); ++u) {
634     const Use *U = Uses[u];
635     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
636       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
637       if (Found && AA.followUseInMBEC(A, U, UserI, State))
638         for (const Use &Us : UserI->uses())
639           Uses.insert(&Us);
640     }
641   }
642 }
643 
644 /// Use the must-be-executed-context around \p I to add information into \p S.
645 /// The AAType class is required to have `followUseInMBEC` method with the
646 /// following signature and behaviour:
647 ///
648 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
649 /// U - Underlying use.
650 /// I - The user of the \p U.
651 /// Returns true if the value should be tracked transitively.
652 ///
653 template <class AAType, typename StateType = typename AAType::StateType>
654 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
655                              Instruction &CtxI) {
656 
657   // Container for (transitive) uses of the associated value.
658   SetVector<const Use *> Uses;
659   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
660     Uses.insert(&U);
661 
662   MustBeExecutedContextExplorer &Explorer =
663       A.getInfoCache().getMustBeExecutedContextExplorer();
664 
665   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
666 
667   if (S.isAtFixpoint())
668     return;
669 
670   SmallVector<const BranchInst *, 4> BrInsts;
671   auto Pred = [&](const Instruction *I) {
672     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
673       if (Br->isConditional())
674         BrInsts.push_back(Br);
675     return true;
676   };
677 
678   // Here, accumulate conditional branch instructions in the context. We
679   // explore the child paths and collect the known states. The disjunction of
680   // those states can be merged to its own state. Let ParentState_i be a state
681   // to indicate the known information for an i-th branch instruction in the
682   // context. ChildStates are created for its successors respectively.
683   //
684   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
685   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
686   //      ...
687   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
688   //
689   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
690   //
691   // FIXME: Currently, recursive branches are not handled. For example, we
692   // can't deduce that ptr must be dereferenced in below function.
693   //
694   // void f(int a, int c, int *ptr) {
695   //    if(a)
696   //      if (b) {
697   //        *ptr = 0;
698   //      } else {
699   //        *ptr = 1;
700   //      }
701   //    else {
702   //      if (b) {
703   //        *ptr = 0;
704   //      } else {
705   //        *ptr = 1;
706   //      }
707   //    }
708   // }
709 
710   Explorer.checkForAllContext(&CtxI, Pred);
711   for (const BranchInst *Br : BrInsts) {
712     StateType ParentState;
713 
714     // The known state of the parent state is a conjunction of children's
715     // known states so it is initialized with a best state.
716     ParentState.indicateOptimisticFixpoint();
717 
718     for (const BasicBlock *BB : Br->successors()) {
719       StateType ChildState;
720 
721       size_t BeforeSize = Uses.size();
722       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
723 
724       // Erase uses which only appear in the child.
725       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
726         It = Uses.erase(It);
727 
728       ParentState &= ChildState;
729     }
730 
731     // Use only known state.
732     S += ParentState;
733   }
734 }
735 
736 /// -----------------------NoUnwind Function Attribute--------------------------
737 
738 struct AANoUnwindImpl : AANoUnwind {
739   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
740 
741   const std::string getAsStr() const override {
742     return getAssumed() ? "nounwind" : "may-unwind";
743   }
744 
745   /// See AbstractAttribute::updateImpl(...).
746   ChangeStatus updateImpl(Attributor &A) override {
747     auto Opcodes = {
748         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
749         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
750         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
751 
752     auto CheckForNoUnwind = [&](Instruction &I) {
753       if (!I.mayThrow())
754         return true;
755 
756       if (const auto *CB = dyn_cast<CallBase>(&I)) {
757         const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
758             *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
759         return NoUnwindAA.isAssumedNoUnwind();
760       }
761       return false;
762     };
763 
764     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
765       return indicatePessimisticFixpoint();
766 
767     return ChangeStatus::UNCHANGED;
768   }
769 };
770 
771 struct AANoUnwindFunction final : public AANoUnwindImpl {
772   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
773       : AANoUnwindImpl(IRP, A) {}
774 
775   /// See AbstractAttribute::trackStatistics()
776   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
777 };
778 
779 /// NoUnwind attribute deduction for a call sites.
780 struct AANoUnwindCallSite final : AANoUnwindImpl {
781   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
782       : AANoUnwindImpl(IRP, A) {}
783 
784   /// See AbstractAttribute::initialize(...).
785   void initialize(Attributor &A) override {
786     AANoUnwindImpl::initialize(A);
787     Function *F = getAssociatedFunction();
788     if (!F || F->isDeclaration())
789       indicatePessimisticFixpoint();
790   }
791 
792   /// See AbstractAttribute::updateImpl(...).
793   ChangeStatus updateImpl(Attributor &A) override {
794     // TODO: Once we have call site specific value information we can provide
795     //       call site specific liveness information and then it makes
796     //       sense to specialize attributes for call sites arguments instead of
797     //       redirecting requests to the callee argument.
798     Function *F = getAssociatedFunction();
799     const IRPosition &FnPos = IRPosition::function(*F);
800     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::REQUIRED);
801     return clampStateAndIndicateChange(getState(), FnAA.getState());
802   }
803 
804   /// See AbstractAttribute::trackStatistics()
805   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
806 };
807 
808 /// --------------------- Function Return Values -------------------------------
809 
810 /// "Attribute" that collects all potential returned values and the return
811 /// instructions that they arise from.
812 ///
813 /// If there is a unique returned value R, the manifest method will:
814 ///   - mark R with the "returned" attribute, if R is an argument.
815 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
816 
817   /// Mapping of values potentially returned by the associated function to the
818   /// return instructions that might return them.
819   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
820 
821   /// Mapping to remember the number of returned values for a call site such
822   /// that we can avoid updates if nothing changed.
823   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
824 
825   /// Set of unresolved calls returned by the associated function.
826   SmallSetVector<CallBase *, 4> UnresolvedCalls;
827 
828   /// State flags
829   ///
830   ///{
831   bool IsFixed = false;
832   bool IsValidState = true;
833   ///}
834 
835 public:
836   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
837       : AAReturnedValues(IRP, A) {}
838 
839   /// See AbstractAttribute::initialize(...).
840   void initialize(Attributor &A) override {
841     // Reset the state.
842     IsFixed = false;
843     IsValidState = true;
844     ReturnedValues.clear();
845 
846     Function *F = getAssociatedFunction();
847     if (!F || F->isDeclaration()) {
848       indicatePessimisticFixpoint();
849       return;
850     }
851     assert(!F->getReturnType()->isVoidTy() &&
852            "Did not expect a void return type!");
853 
854     // The map from instruction opcodes to those instructions in the function.
855     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
856 
857     // Look through all arguments, if one is marked as returned we are done.
858     for (Argument &Arg : F->args()) {
859       if (Arg.hasReturnedAttr()) {
860         auto &ReturnInstSet = ReturnedValues[&Arg];
861         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
862           for (Instruction *RI : *Insts)
863             ReturnInstSet.insert(cast<ReturnInst>(RI));
864 
865         indicateOptimisticFixpoint();
866         return;
867       }
868     }
869 
870     if (!A.isFunctionIPOAmendable(*F))
871       indicatePessimisticFixpoint();
872   }
873 
874   /// See AbstractAttribute::manifest(...).
875   ChangeStatus manifest(Attributor &A) override;
876 
877   /// See AbstractAttribute::getState(...).
878   AbstractState &getState() override { return *this; }
879 
880   /// See AbstractAttribute::getState(...).
881   const AbstractState &getState() const override { return *this; }
882 
883   /// See AbstractAttribute::updateImpl(Attributor &A).
884   ChangeStatus updateImpl(Attributor &A) override;
885 
886   llvm::iterator_range<iterator> returned_values() override {
887     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
888   }
889 
890   llvm::iterator_range<const_iterator> returned_values() const override {
891     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
892   }
893 
894   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
895     return UnresolvedCalls;
896   }
897 
898   /// Return the number of potential return values, -1 if unknown.
899   size_t getNumReturnValues() const override {
900     return isValidState() ? ReturnedValues.size() : -1;
901   }
902 
903   /// Return an assumed unique return value if a single candidate is found. If
904   /// there cannot be one, return a nullptr. If it is not clear yet, return the
905   /// Optional::NoneType.
906   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
907 
908   /// See AbstractState::checkForAllReturnedValues(...).
909   bool checkForAllReturnedValuesAndReturnInsts(
910       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
911       const override;
912 
913   /// Pretty print the attribute similar to the IR representation.
914   const std::string getAsStr() const override;
915 
916   /// See AbstractState::isAtFixpoint().
917   bool isAtFixpoint() const override { return IsFixed; }
918 
919   /// See AbstractState::isValidState().
920   bool isValidState() const override { return IsValidState; }
921 
922   /// See AbstractState::indicateOptimisticFixpoint(...).
923   ChangeStatus indicateOptimisticFixpoint() override {
924     IsFixed = true;
925     return ChangeStatus::UNCHANGED;
926   }
927 
928   ChangeStatus indicatePessimisticFixpoint() override {
929     IsFixed = true;
930     IsValidState = false;
931     return ChangeStatus::CHANGED;
932   }
933 };
934 
935 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
936   ChangeStatus Changed = ChangeStatus::UNCHANGED;
937 
938   // Bookkeeping.
939   assert(isValidState());
940   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
941                   "Number of function with known return values");
942 
943   // Check if we have an assumed unique return value that we could manifest.
944   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
945 
946   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
947     return Changed;
948 
949   // Bookkeeping.
950   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
951                   "Number of function with unique return");
952 
953   // Callback to replace the uses of CB with the constant C.
954   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
955     if (CB.use_empty())
956       return ChangeStatus::UNCHANGED;
957     if (A.changeValueAfterManifest(CB, C))
958       return ChangeStatus::CHANGED;
959     return ChangeStatus::UNCHANGED;
960   };
961 
962   // If the assumed unique return value is an argument, annotate it.
963   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
964     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
965             getAssociatedFunction()->getReturnType())) {
966       getIRPosition() = IRPosition::argument(*UniqueRVArg);
967       Changed = IRAttribute::manifest(A);
968     }
969   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
970     // We can replace the returned value with the unique returned constant.
971     Value &AnchorValue = getAnchorValue();
972     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
973       for (const Use &U : F->uses())
974         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
975           if (CB->isCallee(&U)) {
976             Constant *RVCCast =
977                 CB->getType() == RVC->getType()
978                     ? RVC
979                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
980             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
981           }
982     } else {
983       assert(isa<CallBase>(AnchorValue) &&
984              "Expcected a function or call base anchor!");
985       Constant *RVCCast =
986           AnchorValue.getType() == RVC->getType()
987               ? RVC
988               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
989       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
990     }
991     if (Changed == ChangeStatus::CHANGED)
992       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
993                       "Number of function returns replaced by constant return");
994   }
995 
996   return Changed;
997 }
998 
999 const std::string AAReturnedValuesImpl::getAsStr() const {
1000   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
1001          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
1002          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
1003 }
1004 
1005 Optional<Value *>
1006 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
1007   // If checkForAllReturnedValues provides a unique value, ignoring potential
1008   // undef values that can also be present, it is assumed to be the actual
1009   // return value and forwarded to the caller of this method. If there are
1010   // multiple, a nullptr is returned indicating there cannot be a unique
1011   // returned value.
1012   Optional<Value *> UniqueRV;
1013 
1014   auto Pred = [&](Value &RV) -> bool {
1015     // If we found a second returned value and neither the current nor the saved
1016     // one is an undef, there is no unique returned value. Undefs are special
1017     // since we can pretend they have any value.
1018     if (UniqueRV.hasValue() && UniqueRV != &RV &&
1019         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
1020       UniqueRV = nullptr;
1021       return false;
1022     }
1023 
1024     // Do not overwrite a value with an undef.
1025     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
1026       UniqueRV = &RV;
1027 
1028     return true;
1029   };
1030 
1031   if (!A.checkForAllReturnedValues(Pred, *this))
1032     UniqueRV = nullptr;
1033 
1034   return UniqueRV;
1035 }
1036 
1037 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
1038     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
1039     const {
1040   if (!isValidState())
1041     return false;
1042 
1043   // Check all returned values but ignore call sites as long as we have not
1044   // encountered an overdefined one during an update.
1045   for (auto &It : ReturnedValues) {
1046     Value *RV = It.first;
1047 
1048     CallBase *CB = dyn_cast<CallBase>(RV);
1049     if (CB && !UnresolvedCalls.count(CB))
1050       continue;
1051 
1052     if (!Pred(*RV, It.second))
1053       return false;
1054   }
1055 
1056   return true;
1057 }
1058 
1059 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1060   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1061   bool Changed = false;
1062 
1063   // State used in the value traversals starting in returned values.
1064   struct RVState {
1065     // The map in which we collect return values -> return instrs.
1066     decltype(ReturnedValues) &RetValsMap;
1067     // The flag to indicate a change.
1068     bool &Changed;
1069     // The return instrs we come from.
1070     SmallSetVector<ReturnInst *, 4> RetInsts;
1071   };
1072 
1073   // Callback for a leaf value returned by the associated function.
1074   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1075                          bool) -> bool {
1076     auto Size = RVS.RetValsMap[&Val].size();
1077     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1078     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1079     RVS.Changed |= Inserted;
1080     LLVM_DEBUG({
1081       if (Inserted)
1082         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1083                << " => " << RVS.RetInsts.size() << "\n";
1084     });
1085     return true;
1086   };
1087 
1088   // Helper method to invoke the generic value traversal.
1089   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1090                                 const Instruction *CtxI) {
1091     IRPosition RetValPos = IRPosition::value(RV);
1092     return genericValueTraversal<AAReturnedValues, RVState>(
1093         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1094         /* UseValueSimplify */ false);
1095   };
1096 
1097   // Callback for all "return intructions" live in the associated function.
1098   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1099     ReturnInst &Ret = cast<ReturnInst>(I);
1100     RVState RVS({ReturnedValues, Changed, {}});
1101     RVS.RetInsts.insert(&Ret);
1102     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1103   };
1104 
1105   // Start by discovering returned values from all live returned instructions in
1106   // the associated function.
1107   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1108     return indicatePessimisticFixpoint();
1109 
1110   // Once returned values "directly" present in the code are handled we try to
1111   // resolve returned calls. To avoid modifications to the ReturnedValues map
1112   // while we iterate over it we kept record of potential new entries in a copy
1113   // map, NewRVsMap.
1114   decltype(ReturnedValues) NewRVsMap;
1115 
1116   auto HandleReturnValue = [&](Value *RV,
1117                                SmallSetVector<ReturnInst *, 4> &RIs) {
1118     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1119                       << RIs.size() << " RIs\n");
1120     CallBase *CB = dyn_cast<CallBase>(RV);
1121     if (!CB || UnresolvedCalls.count(CB))
1122       return;
1123 
1124     if (!CB->getCalledFunction()) {
1125       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1126                         << "\n");
1127       UnresolvedCalls.insert(CB);
1128       return;
1129     }
1130 
1131     // TODO: use the function scope once we have call site AAReturnedValues.
1132     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1133         *this, IRPosition::function(*CB->getCalledFunction()),
1134         DepClassTy::REQUIRED);
1135     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1136                       << RetValAA << "\n");
1137 
1138     // Skip dead ends, thus if we do not know anything about the returned
1139     // call we mark it as unresolved and it will stay that way.
1140     if (!RetValAA.getState().isValidState()) {
1141       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1142                         << "\n");
1143       UnresolvedCalls.insert(CB);
1144       return;
1145     }
1146 
1147     // Do not try to learn partial information. If the callee has unresolved
1148     // return values we will treat the call as unresolved/opaque.
1149     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1150     if (!RetValAAUnresolvedCalls.empty()) {
1151       UnresolvedCalls.insert(CB);
1152       return;
1153     }
1154 
1155     // Now check if we can track transitively returned values. If possible, thus
1156     // if all return value can be represented in the current scope, do so.
1157     bool Unresolved = false;
1158     for (auto &RetValAAIt : RetValAA.returned_values()) {
1159       Value *RetVal = RetValAAIt.first;
1160       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1161           isa<Constant>(RetVal))
1162         continue;
1163       // Anything that did not fit in the above categories cannot be resolved,
1164       // mark the call as unresolved.
1165       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1166                            "cannot be translated: "
1167                         << *RetVal << "\n");
1168       UnresolvedCalls.insert(CB);
1169       Unresolved = true;
1170       break;
1171     }
1172 
1173     if (Unresolved)
1174       return;
1175 
1176     // Now track transitively returned values.
1177     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1178     if (NumRetAA == RetValAA.getNumReturnValues()) {
1179       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1180                            "changed since it was seen last\n");
1181       return;
1182     }
1183     NumRetAA = RetValAA.getNumReturnValues();
1184 
1185     for (auto &RetValAAIt : RetValAA.returned_values()) {
1186       Value *RetVal = RetValAAIt.first;
1187       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1188         // Arguments are mapped to call site operands and we begin the traversal
1189         // again.
1190         bool Unused = false;
1191         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1192         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1193         continue;
1194       }
1195       if (isa<CallBase>(RetVal)) {
1196         // Call sites are resolved by the callee attribute over time, no need to
1197         // do anything for us.
1198         continue;
1199       }
1200       if (isa<Constant>(RetVal)) {
1201         // Constants are valid everywhere, we can simply take them.
1202         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1203         continue;
1204       }
1205     }
1206   };
1207 
1208   for (auto &It : ReturnedValues)
1209     HandleReturnValue(It.first, It.second);
1210 
1211   // Because processing the new information can again lead to new return values
1212   // we have to be careful and iterate until this iteration is complete. The
1213   // idea is that we are in a stable state at the end of an update. All return
1214   // values have been handled and properly categorized. We might not update
1215   // again if we have not requested a non-fix attribute so we cannot "wait" for
1216   // the next update to analyze a new return value.
1217   while (!NewRVsMap.empty()) {
1218     auto It = std::move(NewRVsMap.back());
1219     NewRVsMap.pop_back();
1220 
1221     assert(!It.second.empty() && "Entry does not add anything.");
1222     auto &ReturnInsts = ReturnedValues[It.first];
1223     for (ReturnInst *RI : It.second)
1224       if (ReturnInsts.insert(RI)) {
1225         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1226                           << *It.first << " => " << *RI << "\n");
1227         HandleReturnValue(It.first, ReturnInsts);
1228         Changed = true;
1229       }
1230   }
1231 
1232   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1233   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1234 }
1235 
1236 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1237   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1238       : AAReturnedValuesImpl(IRP, A) {}
1239 
1240   /// See AbstractAttribute::trackStatistics()
1241   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1242 };
1243 
1244 /// Returned values information for a call sites.
1245 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1246   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1247       : AAReturnedValuesImpl(IRP, A) {}
1248 
1249   /// See AbstractAttribute::initialize(...).
1250   void initialize(Attributor &A) override {
1251     // TODO: Once we have call site specific value information we can provide
1252     //       call site specific liveness information and then it makes
1253     //       sense to specialize attributes for call sites instead of
1254     //       redirecting requests to the callee.
1255     llvm_unreachable("Abstract attributes for returned values are not "
1256                      "supported for call sites yet!");
1257   }
1258 
1259   /// See AbstractAttribute::updateImpl(...).
1260   ChangeStatus updateImpl(Attributor &A) override {
1261     return indicatePessimisticFixpoint();
1262   }
1263 
1264   /// See AbstractAttribute::trackStatistics()
1265   void trackStatistics() const override {}
1266 };
1267 
1268 /// ------------------------ NoSync Function Attribute -------------------------
1269 
1270 struct AANoSyncImpl : AANoSync {
1271   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1272 
1273   const std::string getAsStr() const override {
1274     return getAssumed() ? "nosync" : "may-sync";
1275   }
1276 
1277   /// See AbstractAttribute::updateImpl(...).
1278   ChangeStatus updateImpl(Attributor &A) override;
1279 
1280   /// Helper function used to determine whether an instruction is non-relaxed
1281   /// atomic. In other words, if an atomic instruction does not have unordered
1282   /// or monotonic ordering
1283   static bool isNonRelaxedAtomic(Instruction *I);
1284 
1285   /// Helper function specific for intrinsics which are potentially volatile
1286   static bool isNoSyncIntrinsic(Instruction *I);
1287 };
1288 
1289 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1290   if (!I->isAtomic())
1291     return false;
1292 
1293   if (auto *FI = dyn_cast<FenceInst>(I))
1294     // All legal orderings for fence are stronger than monotonic.
1295     return FI->getSyncScopeID() != SyncScope::SingleThread;
1296   else if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) {
1297     // Unordered is not a legal ordering for cmpxchg.
1298     return (AI->getSuccessOrdering() != AtomicOrdering::Monotonic ||
1299             AI->getFailureOrdering() != AtomicOrdering::Monotonic);
1300   }
1301 
1302   AtomicOrdering Ordering;
1303   switch (I->getOpcode()) {
1304   case Instruction::AtomicRMW:
1305     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1306     break;
1307   case Instruction::Store:
1308     Ordering = cast<StoreInst>(I)->getOrdering();
1309     break;
1310   case Instruction::Load:
1311     Ordering = cast<LoadInst>(I)->getOrdering();
1312     break;
1313   default:
1314     llvm_unreachable(
1315         "New atomic operations need to be known in the attributor.");
1316   }
1317 
1318   return (Ordering != AtomicOrdering::Unordered &&
1319           Ordering != AtomicOrdering::Monotonic);
1320 }
1321 
1322 /// Return true if this intrinsic is nosync.  This is only used for intrinsics
1323 /// which would be nosync except that they have a volatile flag.  All other
1324 /// intrinsics are simply annotated with the nosync attribute in Intrinsics.td.
1325 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1326   if (auto *MI = dyn_cast<MemIntrinsic>(I))
1327     return !MI->isVolatile();
1328   return false;
1329 }
1330 
1331 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1332 
1333   auto CheckRWInstForNoSync = [&](Instruction &I) {
1334     /// We are looking for volatile instructions or Non-Relaxed atomics.
1335 
1336     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1337       if (CB->hasFnAttr(Attribute::NoSync))
1338         return true;
1339 
1340       if (isNoSyncIntrinsic(&I))
1341         return true;
1342 
1343       const auto &NoSyncAA = A.getAAFor<AANoSync>(
1344           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
1345       return NoSyncAA.isAssumedNoSync();
1346     }
1347 
1348     if (!I.isVolatile() && !isNonRelaxedAtomic(&I))
1349       return true;
1350 
1351     return false;
1352   };
1353 
1354   auto CheckForNoSync = [&](Instruction &I) {
1355     // At this point we handled all read/write effects and they are all
1356     // nosync, so they can be skipped.
1357     if (I.mayReadOrWriteMemory())
1358       return true;
1359 
1360     // non-convergent and readnone imply nosync.
1361     return !cast<CallBase>(I).isConvergent();
1362   };
1363 
1364   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1365       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1366     return indicatePessimisticFixpoint();
1367 
1368   return ChangeStatus::UNCHANGED;
1369 }
1370 
1371 struct AANoSyncFunction final : public AANoSyncImpl {
1372   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1373       : AANoSyncImpl(IRP, A) {}
1374 
1375   /// See AbstractAttribute::trackStatistics()
1376   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1377 };
1378 
1379 /// NoSync attribute deduction for a call sites.
1380 struct AANoSyncCallSite final : AANoSyncImpl {
1381   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1382       : AANoSyncImpl(IRP, A) {}
1383 
1384   /// See AbstractAttribute::initialize(...).
1385   void initialize(Attributor &A) override {
1386     AANoSyncImpl::initialize(A);
1387     Function *F = getAssociatedFunction();
1388     if (!F || F->isDeclaration())
1389       indicatePessimisticFixpoint();
1390   }
1391 
1392   /// See AbstractAttribute::updateImpl(...).
1393   ChangeStatus updateImpl(Attributor &A) override {
1394     // TODO: Once we have call site specific value information we can provide
1395     //       call site specific liveness information and then it makes
1396     //       sense to specialize attributes for call sites arguments instead of
1397     //       redirecting requests to the callee argument.
1398     Function *F = getAssociatedFunction();
1399     const IRPosition &FnPos = IRPosition::function(*F);
1400     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos, DepClassTy::REQUIRED);
1401     return clampStateAndIndicateChange(getState(), FnAA.getState());
1402   }
1403 
1404   /// See AbstractAttribute::trackStatistics()
1405   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1406 };
1407 
1408 /// ------------------------ No-Free Attributes ----------------------------
1409 
1410 struct AANoFreeImpl : public AANoFree {
1411   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1412 
1413   /// See AbstractAttribute::updateImpl(...).
1414   ChangeStatus updateImpl(Attributor &A) override {
1415     auto CheckForNoFree = [&](Instruction &I) {
1416       const auto &CB = cast<CallBase>(I);
1417       if (CB.hasFnAttr(Attribute::NoFree))
1418         return true;
1419 
1420       const auto &NoFreeAA = A.getAAFor<AANoFree>(
1421           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1422       return NoFreeAA.isAssumedNoFree();
1423     };
1424 
1425     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1426       return indicatePessimisticFixpoint();
1427     return ChangeStatus::UNCHANGED;
1428   }
1429 
1430   /// See AbstractAttribute::getAsStr().
1431   const std::string getAsStr() const override {
1432     return getAssumed() ? "nofree" : "may-free";
1433   }
1434 };
1435 
1436 struct AANoFreeFunction final : public AANoFreeImpl {
1437   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1438       : AANoFreeImpl(IRP, A) {}
1439 
1440   /// See AbstractAttribute::trackStatistics()
1441   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1442 };
1443 
1444 /// NoFree attribute deduction for a call sites.
1445 struct AANoFreeCallSite final : AANoFreeImpl {
1446   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1447       : AANoFreeImpl(IRP, A) {}
1448 
1449   /// See AbstractAttribute::initialize(...).
1450   void initialize(Attributor &A) override {
1451     AANoFreeImpl::initialize(A);
1452     Function *F = getAssociatedFunction();
1453     if (!F || F->isDeclaration())
1454       indicatePessimisticFixpoint();
1455   }
1456 
1457   /// See AbstractAttribute::updateImpl(...).
1458   ChangeStatus updateImpl(Attributor &A) override {
1459     // TODO: Once we have call site specific value information we can provide
1460     //       call site specific liveness information and then it makes
1461     //       sense to specialize attributes for call sites arguments instead of
1462     //       redirecting requests to the callee argument.
1463     Function *F = getAssociatedFunction();
1464     const IRPosition &FnPos = IRPosition::function(*F);
1465     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos, DepClassTy::REQUIRED);
1466     return clampStateAndIndicateChange(getState(), FnAA.getState());
1467   }
1468 
1469   /// See AbstractAttribute::trackStatistics()
1470   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1471 };
1472 
1473 /// NoFree attribute for floating values.
1474 struct AANoFreeFloating : AANoFreeImpl {
1475   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1476       : AANoFreeImpl(IRP, A) {}
1477 
1478   /// See AbstractAttribute::trackStatistics()
1479   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1480 
1481   /// See Abstract Attribute::updateImpl(...).
1482   ChangeStatus updateImpl(Attributor &A) override {
1483     const IRPosition &IRP = getIRPosition();
1484 
1485     const auto &NoFreeAA = A.getAAFor<AANoFree>(
1486         *this, IRPosition::function_scope(IRP), DepClassTy::OPTIONAL);
1487     if (NoFreeAA.isAssumedNoFree())
1488       return ChangeStatus::UNCHANGED;
1489 
1490     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1491     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1492       Instruction *UserI = cast<Instruction>(U.getUser());
1493       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1494         if (CB->isBundleOperand(&U))
1495           return false;
1496         if (!CB->isArgOperand(&U))
1497           return true;
1498         unsigned ArgNo = CB->getArgOperandNo(&U);
1499 
1500         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1501             *this, IRPosition::callsite_argument(*CB, ArgNo),
1502             DepClassTy::REQUIRED);
1503         return NoFreeArg.isAssumedNoFree();
1504       }
1505 
1506       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1507           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1508         Follow = true;
1509         return true;
1510       }
1511       if (isa<ReturnInst>(UserI))
1512         return true;
1513 
1514       // Unknown user.
1515       return false;
1516     };
1517     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1518       return indicatePessimisticFixpoint();
1519 
1520     return ChangeStatus::UNCHANGED;
1521   }
1522 };
1523 
1524 /// NoFree attribute for a call site argument.
1525 struct AANoFreeArgument final : AANoFreeFloating {
1526   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1527       : AANoFreeFloating(IRP, A) {}
1528 
1529   /// See AbstractAttribute::trackStatistics()
1530   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1531 };
1532 
1533 /// NoFree attribute for call site arguments.
1534 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
1535   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1536       : AANoFreeFloating(IRP, A) {}
1537 
1538   /// See AbstractAttribute::updateImpl(...).
1539   ChangeStatus updateImpl(Attributor &A) override {
1540     // TODO: Once we have call site specific value information we can provide
1541     //       call site specific liveness information and then it makes
1542     //       sense to specialize attributes for call sites arguments instead of
1543     //       redirecting requests to the callee argument.
1544     Argument *Arg = getAssociatedArgument();
1545     if (!Arg)
1546       return indicatePessimisticFixpoint();
1547     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1548     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos, DepClassTy::REQUIRED);
1549     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1550   }
1551 
1552   /// See AbstractAttribute::trackStatistics()
1553   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1554 };
1555 
1556 /// NoFree attribute for function return value.
1557 struct AANoFreeReturned final : AANoFreeFloating {
1558   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1559       : AANoFreeFloating(IRP, A) {
1560     llvm_unreachable("NoFree is not applicable to function returns!");
1561   }
1562 
1563   /// See AbstractAttribute::initialize(...).
1564   void initialize(Attributor &A) override {
1565     llvm_unreachable("NoFree is not applicable to function returns!");
1566   }
1567 
1568   /// See AbstractAttribute::updateImpl(...).
1569   ChangeStatus updateImpl(Attributor &A) override {
1570     llvm_unreachable("NoFree is not applicable to function returns!");
1571   }
1572 
1573   /// See AbstractAttribute::trackStatistics()
1574   void trackStatistics() const override {}
1575 };
1576 
1577 /// NoFree attribute deduction for a call site return value.
1578 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
1579   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1580       : AANoFreeFloating(IRP, A) {}
1581 
1582   ChangeStatus manifest(Attributor &A) override {
1583     return ChangeStatus::UNCHANGED;
1584   }
1585   /// See AbstractAttribute::trackStatistics()
1586   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1587 };
1588 
1589 /// ------------------------ NonNull Argument Attribute ------------------------
1590 static int64_t getKnownNonNullAndDerefBytesForUse(
1591     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1592     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1593   TrackUse = false;
1594 
1595   const Value *UseV = U->get();
1596   if (!UseV->getType()->isPointerTy())
1597     return 0;
1598 
1599   // We need to follow common pointer manipulation uses to the accesses they
1600   // feed into. We can try to be smart to avoid looking through things we do not
1601   // like for now, e.g., non-inbounds GEPs.
1602   if (isa<CastInst>(I)) {
1603     TrackUse = true;
1604     return 0;
1605   }
1606 
1607   if (isa<GetElementPtrInst>(I)) {
1608     TrackUse = true;
1609     return 0;
1610   }
1611 
1612   Type *PtrTy = UseV->getType();
1613   const Function *F = I->getFunction();
1614   bool NullPointerIsDefined =
1615       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1616   const DataLayout &DL = A.getInfoCache().getDL();
1617   if (const auto *CB = dyn_cast<CallBase>(I)) {
1618     if (CB->isBundleOperand(U)) {
1619       if (RetainedKnowledge RK = getKnowledgeFromUse(
1620               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1621         IsNonNull |=
1622             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1623         return RK.ArgValue;
1624       }
1625       return 0;
1626     }
1627 
1628     if (CB->isCallee(U)) {
1629       IsNonNull |= !NullPointerIsDefined;
1630       return 0;
1631     }
1632 
1633     unsigned ArgNo = CB->getArgOperandNo(U);
1634     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1635     // As long as we only use known information there is no need to track
1636     // dependences here.
1637     auto &DerefAA =
1638         A.getAAFor<AADereferenceable>(QueryingAA, IRP, DepClassTy::NONE);
1639     IsNonNull |= DerefAA.isKnownNonNull();
1640     return DerefAA.getKnownDereferenceableBytes();
1641   }
1642 
1643   int64_t Offset;
1644   const Value *Base =
1645       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1646   if (Base) {
1647     if (Base == &AssociatedValue &&
1648         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1649       int64_t DerefBytes =
1650           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1651 
1652       IsNonNull |= !NullPointerIsDefined;
1653       return std::max(int64_t(0), DerefBytes);
1654     }
1655   }
1656 
1657   /// Corner case when an offset is 0.
1658   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1659                                               /*AllowNonInbounds*/ true);
1660   if (Base) {
1661     if (Offset == 0 && Base == &AssociatedValue &&
1662         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1663       int64_t DerefBytes =
1664           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1665       IsNonNull |= !NullPointerIsDefined;
1666       return std::max(int64_t(0), DerefBytes);
1667     }
1668   }
1669 
1670   return 0;
1671 }
1672 
1673 struct AANonNullImpl : AANonNull {
1674   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1675       : AANonNull(IRP, A),
1676         NullIsDefined(NullPointerIsDefined(
1677             getAnchorScope(),
1678             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1679 
1680   /// See AbstractAttribute::initialize(...).
1681   void initialize(Attributor &A) override {
1682     Value &V = getAssociatedValue();
1683     if (!NullIsDefined &&
1684         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1685                 /* IgnoreSubsumingPositions */ false, &A)) {
1686       indicateOptimisticFixpoint();
1687       return;
1688     }
1689 
1690     if (isa<ConstantPointerNull>(V)) {
1691       indicatePessimisticFixpoint();
1692       return;
1693     }
1694 
1695     AANonNull::initialize(A);
1696 
1697     bool CanBeNull, CanBeFreed;
1698     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull,
1699                                          CanBeFreed)) {
1700       if (!CanBeNull) {
1701         indicateOptimisticFixpoint();
1702         return;
1703       }
1704     }
1705 
1706     if (isa<GlobalValue>(&getAssociatedValue())) {
1707       indicatePessimisticFixpoint();
1708       return;
1709     }
1710 
1711     if (Instruction *CtxI = getCtxI())
1712       followUsesInMBEC(*this, A, getState(), *CtxI);
1713   }
1714 
1715   /// See followUsesInMBEC
1716   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1717                        AANonNull::StateType &State) {
1718     bool IsNonNull = false;
1719     bool TrackUse = false;
1720     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1721                                        IsNonNull, TrackUse);
1722     State.setKnown(IsNonNull);
1723     return TrackUse;
1724   }
1725 
1726   /// See AbstractAttribute::getAsStr().
1727   const std::string getAsStr() const override {
1728     return getAssumed() ? "nonnull" : "may-null";
1729   }
1730 
1731   /// Flag to determine if the underlying value can be null and still allow
1732   /// valid accesses.
1733   const bool NullIsDefined;
1734 };
1735 
1736 /// NonNull attribute for a floating value.
1737 struct AANonNullFloating : public AANonNullImpl {
1738   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1739       : AANonNullImpl(IRP, A) {}
1740 
1741   /// See AbstractAttribute::updateImpl(...).
1742   ChangeStatus updateImpl(Attributor &A) override {
1743     const DataLayout &DL = A.getDataLayout();
1744 
1745     DominatorTree *DT = nullptr;
1746     AssumptionCache *AC = nullptr;
1747     InformationCache &InfoCache = A.getInfoCache();
1748     if (const Function *Fn = getAnchorScope()) {
1749       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1750       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1751     }
1752 
1753     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1754                             AANonNull::StateType &T, bool Stripped) -> bool {
1755       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V),
1756                                              DepClassTy::REQUIRED);
1757       if (!Stripped && this == &AA) {
1758         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1759           T.indicatePessimisticFixpoint();
1760       } else {
1761         // Use abstract attribute information.
1762         const AANonNull::StateType &NS = AA.getState();
1763         T ^= NS;
1764       }
1765       return T.isValidState();
1766     };
1767 
1768     StateType T;
1769     if (!genericValueTraversal<AANonNull, StateType>(
1770             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1771       return indicatePessimisticFixpoint();
1772 
1773     return clampStateAndIndicateChange(getState(), T);
1774   }
1775 
1776   /// See AbstractAttribute::trackStatistics()
1777   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1778 };
1779 
1780 /// NonNull attribute for function return value.
1781 struct AANonNullReturned final
1782     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
1783   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1784       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1785 
1786   /// See AbstractAttribute::getAsStr().
1787   const std::string getAsStr() const override {
1788     return getAssumed() ? "nonnull" : "may-null";
1789   }
1790 
1791   /// See AbstractAttribute::trackStatistics()
1792   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1793 };
1794 
1795 /// NonNull attribute for function argument.
1796 struct AANonNullArgument final
1797     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1798   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1799       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1800 
1801   /// See AbstractAttribute::trackStatistics()
1802   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1803 };
1804 
1805 struct AANonNullCallSiteArgument final : AANonNullFloating {
1806   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1807       : AANonNullFloating(IRP, A) {}
1808 
1809   /// See AbstractAttribute::trackStatistics()
1810   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1811 };
1812 
1813 /// NonNull attribute for a call site return position.
1814 struct AANonNullCallSiteReturned final
1815     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1816   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1817       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1818 
1819   /// See AbstractAttribute::trackStatistics()
1820   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1821 };
1822 
1823 /// ------------------------ No-Recurse Attributes ----------------------------
1824 
1825 struct AANoRecurseImpl : public AANoRecurse {
1826   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1827 
1828   /// See AbstractAttribute::getAsStr()
1829   const std::string getAsStr() const override {
1830     return getAssumed() ? "norecurse" : "may-recurse";
1831   }
1832 };
1833 
1834 struct AANoRecurseFunction final : AANoRecurseImpl {
1835   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1836       : AANoRecurseImpl(IRP, A) {}
1837 
1838   /// See AbstractAttribute::initialize(...).
1839   void initialize(Attributor &A) override {
1840     AANoRecurseImpl::initialize(A);
1841     if (const Function *F = getAnchorScope())
1842       if (A.getInfoCache().getSccSize(*F) != 1)
1843         indicatePessimisticFixpoint();
1844   }
1845 
1846   /// See AbstractAttribute::updateImpl(...).
1847   ChangeStatus updateImpl(Attributor &A) override {
1848 
1849     // If all live call sites are known to be no-recurse, we are as well.
1850     auto CallSitePred = [&](AbstractCallSite ACS) {
1851       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1852           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1853           DepClassTy::NONE);
1854       return NoRecurseAA.isKnownNoRecurse();
1855     };
1856     bool AllCallSitesKnown;
1857     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1858       // If we know all call sites and all are known no-recurse, we are done.
1859       // If all known call sites, which might not be all that exist, are known
1860       // to be no-recurse, we are not done but we can continue to assume
1861       // no-recurse. If one of the call sites we have not visited will become
1862       // live, another update is triggered.
1863       if (AllCallSitesKnown)
1864         indicateOptimisticFixpoint();
1865       return ChangeStatus::UNCHANGED;
1866     }
1867 
1868     // If the above check does not hold anymore we look at the calls.
1869     auto CheckForNoRecurse = [&](Instruction &I) {
1870       const auto &CB = cast<CallBase>(I);
1871       if (CB.hasFnAttr(Attribute::NoRecurse))
1872         return true;
1873 
1874       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1875           *this, IRPosition::callsite_function(CB), DepClassTy::REQUIRED);
1876       if (!NoRecurseAA.isAssumedNoRecurse())
1877         return false;
1878 
1879       // Recursion to the same function
1880       if (CB.getCalledFunction() == getAnchorScope())
1881         return false;
1882 
1883       return true;
1884     };
1885 
1886     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1887       return indicatePessimisticFixpoint();
1888     return ChangeStatus::UNCHANGED;
1889   }
1890 
1891   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1892 };
1893 
1894 /// NoRecurse attribute deduction for a call sites.
1895 struct AANoRecurseCallSite final : AANoRecurseImpl {
1896   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1897       : AANoRecurseImpl(IRP, A) {}
1898 
1899   /// See AbstractAttribute::initialize(...).
1900   void initialize(Attributor &A) override {
1901     AANoRecurseImpl::initialize(A);
1902     Function *F = getAssociatedFunction();
1903     if (!F || F->isDeclaration())
1904       indicatePessimisticFixpoint();
1905   }
1906 
1907   /// See AbstractAttribute::updateImpl(...).
1908   ChangeStatus updateImpl(Attributor &A) override {
1909     // TODO: Once we have call site specific value information we can provide
1910     //       call site specific liveness information and then it makes
1911     //       sense to specialize attributes for call sites arguments instead of
1912     //       redirecting requests to the callee argument.
1913     Function *F = getAssociatedFunction();
1914     const IRPosition &FnPos = IRPosition::function(*F);
1915     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos, DepClassTy::REQUIRED);
1916     return clampStateAndIndicateChange(getState(), FnAA.getState());
1917   }
1918 
1919   /// See AbstractAttribute::trackStatistics()
1920   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1921 };
1922 
1923 /// -------------------- Undefined-Behavior Attributes ------------------------
1924 
1925 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
1926   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1927       : AAUndefinedBehavior(IRP, A) {}
1928 
1929   /// See AbstractAttribute::updateImpl(...).
1930   // through a pointer (i.e. also branches etc.)
1931   ChangeStatus updateImpl(Attributor &A) override {
1932     const size_t UBPrevSize = KnownUBInsts.size();
1933     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1934 
1935     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1936       // Skip instructions that are already saved.
1937       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1938         return true;
1939 
1940       // If we reach here, we know we have an instruction
1941       // that accesses memory through a pointer operand,
1942       // for which getPointerOperand() should give it to us.
1943       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1944       assert(PtrOp &&
1945              "Expected pointer operand of memory accessing instruction");
1946 
1947       // Either we stopped and the appropriate action was taken,
1948       // or we got back a simplified value to continue.
1949       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1950       if (!SimplifiedPtrOp.hasValue())
1951         return true;
1952       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1953 
1954       // A memory access through a pointer is considered UB
1955       // only if the pointer has constant null value.
1956       // TODO: Expand it to not only check constant values.
1957       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1958         AssumedNoUBInsts.insert(&I);
1959         return true;
1960       }
1961       const Type *PtrTy = PtrOpVal->getType();
1962 
1963       // Because we only consider instructions inside functions,
1964       // assume that a parent function exists.
1965       const Function *F = I.getFunction();
1966 
1967       // A memory access using constant null pointer is only considered UB
1968       // if null pointer is _not_ defined for the target platform.
1969       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1970         AssumedNoUBInsts.insert(&I);
1971       else
1972         KnownUBInsts.insert(&I);
1973       return true;
1974     };
1975 
1976     auto InspectBrInstForUB = [&](Instruction &I) {
1977       // A conditional branch instruction is considered UB if it has `undef`
1978       // condition.
1979 
1980       // Skip instructions that are already saved.
1981       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1982         return true;
1983 
1984       // We know we have a branch instruction.
1985       auto BrInst = cast<BranchInst>(&I);
1986 
1987       // Unconditional branches are never considered UB.
1988       if (BrInst->isUnconditional())
1989         return true;
1990 
1991       // Either we stopped and the appropriate action was taken,
1992       // or we got back a simplified value to continue.
1993       Optional<Value *> SimplifiedCond =
1994           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1995       if (!SimplifiedCond.hasValue())
1996         return true;
1997       AssumedNoUBInsts.insert(&I);
1998       return true;
1999     };
2000 
2001     auto InspectCallSiteForUB = [&](Instruction &I) {
2002       // Check whether a callsite always cause UB or not
2003 
2004       // Skip instructions that are already saved.
2005       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2006         return true;
2007 
2008       // Check nonnull and noundef argument attribute violation for each
2009       // callsite.
2010       CallBase &CB = cast<CallBase>(I);
2011       Function *Callee = CB.getCalledFunction();
2012       if (!Callee)
2013         return true;
2014       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2015         // If current argument is known to be simplified to null pointer and the
2016         // corresponding argument position is known to have nonnull attribute,
2017         // the argument is poison. Furthermore, if the argument is poison and
2018         // the position is known to have noundef attriubte, this callsite is
2019         // considered UB.
2020         if (idx >= Callee->arg_size())
2021           break;
2022         Value *ArgVal = CB.getArgOperand(idx);
2023         if (!ArgVal)
2024           continue;
2025         // Here, we handle three cases.
2026         //   (1) Not having a value means it is dead. (we can replace the value
2027         //       with undef)
2028         //   (2) Simplified to undef. The argument violate noundef attriubte.
2029         //   (3) Simplified to null pointer where known to be nonnull.
2030         //       The argument is a poison value and violate noundef attribute.
2031         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2032         auto &NoUndefAA =
2033             A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2034         if (!NoUndefAA.isKnownNoUndef())
2035           continue;
2036         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2037             *this, IRPosition::value(*ArgVal), DepClassTy::NONE);
2038         if (!ValueSimplifyAA.isKnown())
2039           continue;
2040         Optional<Value *> SimplifiedVal =
2041             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2042         if (!SimplifiedVal.hasValue() ||
2043             isa<UndefValue>(*SimplifiedVal.getValue())) {
2044           KnownUBInsts.insert(&I);
2045           continue;
2046         }
2047         if (!ArgVal->getType()->isPointerTy() ||
2048             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2049           continue;
2050         auto &NonNullAA =
2051             A.getAAFor<AANonNull>(*this, CalleeArgumentIRP, DepClassTy::NONE);
2052         if (NonNullAA.isKnownNonNull())
2053           KnownUBInsts.insert(&I);
2054       }
2055       return true;
2056     };
2057 
2058     auto InspectReturnInstForUB =
2059         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2060           // Check if a return instruction always cause UB or not
2061           // Note: It is guaranteed that the returned position of the anchor
2062           //       scope has noundef attribute when this is called.
2063           //       We also ensure the return position is not "assumed dead"
2064           //       because the returned value was then potentially simplified to
2065           //       `undef` in AAReturnedValues without removing the `noundef`
2066           //       attribute yet.
2067 
2068           // When the returned position has noundef attriubte, UB occur in the
2069           // following cases.
2070           //   (1) Returned value is known to be undef.
2071           //   (2) The value is known to be a null pointer and the returned
2072           //       position has nonnull attribute (because the returned value is
2073           //       poison).
2074           bool FoundUB = false;
2075           if (isa<UndefValue>(V)) {
2076             FoundUB = true;
2077           } else {
2078             if (isa<ConstantPointerNull>(V)) {
2079               auto &NonNullAA = A.getAAFor<AANonNull>(
2080                   *this, IRPosition::returned(*getAnchorScope()),
2081                   DepClassTy::NONE);
2082               if (NonNullAA.isKnownNonNull())
2083                 FoundUB = true;
2084             }
2085           }
2086 
2087           if (FoundUB)
2088             for (ReturnInst *RI : RetInsts)
2089               KnownUBInsts.insert(RI);
2090           return true;
2091         };
2092 
2093     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2094                               {Instruction::Load, Instruction::Store,
2095                                Instruction::AtomicCmpXchg,
2096                                Instruction::AtomicRMW},
2097                               /* CheckBBLivenessOnly */ true);
2098     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2099                               /* CheckBBLivenessOnly */ true);
2100     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2101 
2102     // If the returned position of the anchor scope has noundef attriubte, check
2103     // all returned instructions.
2104     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2105       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2106       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2107         auto &RetPosNoUndefAA =
2108             A.getAAFor<AANoUndef>(*this, ReturnIRP, DepClassTy::NONE);
2109         if (RetPosNoUndefAA.isKnownNoUndef())
2110           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2111                                                     *this);
2112       }
2113     }
2114 
2115     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2116         UBPrevSize != KnownUBInsts.size())
2117       return ChangeStatus::CHANGED;
2118     return ChangeStatus::UNCHANGED;
2119   }
2120 
2121   bool isKnownToCauseUB(Instruction *I) const override {
2122     return KnownUBInsts.count(I);
2123   }
2124 
2125   bool isAssumedToCauseUB(Instruction *I) const override {
2126     // In simple words, if an instruction is not in the assumed to _not_
2127     // cause UB, then it is assumed UB (that includes those
2128     // in the KnownUBInsts set). The rest is boilerplate
2129     // is to ensure that it is one of the instructions we test
2130     // for UB.
2131 
2132     switch (I->getOpcode()) {
2133     case Instruction::Load:
2134     case Instruction::Store:
2135     case Instruction::AtomicCmpXchg:
2136     case Instruction::AtomicRMW:
2137       return !AssumedNoUBInsts.count(I);
2138     case Instruction::Br: {
2139       auto BrInst = cast<BranchInst>(I);
2140       if (BrInst->isUnconditional())
2141         return false;
2142       return !AssumedNoUBInsts.count(I);
2143     } break;
2144     default:
2145       return false;
2146     }
2147     return false;
2148   }
2149 
2150   ChangeStatus manifest(Attributor &A) override {
2151     if (KnownUBInsts.empty())
2152       return ChangeStatus::UNCHANGED;
2153     for (Instruction *I : KnownUBInsts)
2154       A.changeToUnreachableAfterManifest(I);
2155     return ChangeStatus::CHANGED;
2156   }
2157 
2158   /// See AbstractAttribute::getAsStr()
2159   const std::string getAsStr() const override {
2160     return getAssumed() ? "undefined-behavior" : "no-ub";
2161   }
2162 
2163   /// Note: The correctness of this analysis depends on the fact that the
2164   /// following 2 sets will stop changing after some point.
2165   /// "Change" here means that their size changes.
2166   /// The size of each set is monotonically increasing
2167   /// (we only add items to them) and it is upper bounded by the number of
2168   /// instructions in the processed function (we can never save more
2169   /// elements in either set than this number). Hence, at some point,
2170   /// they will stop increasing.
2171   /// Consequently, at some point, both sets will have stopped
2172   /// changing, effectively making the analysis reach a fixpoint.
2173 
2174   /// Note: These 2 sets are disjoint and an instruction can be considered
2175   /// one of 3 things:
2176   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2177   ///    the KnownUBInsts set.
2178   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2179   ///    has a reason to assume it).
2180   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2181   ///    could not find a reason to assume or prove that it can cause UB,
2182   ///    hence it assumes it doesn't. We have a set for these instructions
2183   ///    so that we don't reprocess them in every update.
2184   ///    Note however that instructions in this set may cause UB.
2185 
2186 protected:
2187   /// A set of all live instructions _known_ to cause UB.
2188   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2189 
2190 private:
2191   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2192   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2193 
2194   // Should be called on updates in which if we're processing an instruction
2195   // \p I that depends on a value \p V, one of the following has to happen:
2196   // - If the value is assumed, then stop.
2197   // - If the value is known but undef, then consider it UB.
2198   // - Otherwise, do specific processing with the simplified value.
2199   // We return None in the first 2 cases to signify that an appropriate
2200   // action was taken and the caller should stop.
2201   // Otherwise, we return the simplified value that the caller should
2202   // use for specific processing.
2203   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2204                                          Instruction *I) {
2205     const auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2206         *this, IRPosition::value(*V), DepClassTy::REQUIRED);
2207     Optional<Value *> SimplifiedV =
2208         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2209     if (!ValueSimplifyAA.isKnown()) {
2210       // Don't depend on assumed values.
2211       return llvm::None;
2212     }
2213     if (!SimplifiedV.hasValue()) {
2214       // If it is known (which we tested above) but it doesn't have a value,
2215       // then we can assume `undef` and hence the instruction is UB.
2216       KnownUBInsts.insert(I);
2217       return llvm::None;
2218     }
2219     Value *Val = SimplifiedV.getValue();
2220     if (isa<UndefValue>(Val)) {
2221       KnownUBInsts.insert(I);
2222       return llvm::None;
2223     }
2224     return Val;
2225   }
2226 };
2227 
2228 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
2229   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2230       : AAUndefinedBehaviorImpl(IRP, A) {}
2231 
2232   /// See AbstractAttribute::trackStatistics()
2233   void trackStatistics() const override {
2234     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2235                "Number of instructions known to have UB");
2236     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2237         KnownUBInsts.size();
2238   }
2239 };
2240 
2241 /// ------------------------ Will-Return Attributes ----------------------------
2242 
2243 // Helper function that checks whether a function has any cycle which we don't
2244 // know if it is bounded or not.
2245 // Loops with maximum trip count are considered bounded, any other cycle not.
2246 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2247   ScalarEvolution *SE =
2248       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2249   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2250   // If either SCEV or LoopInfo is not available for the function then we assume
2251   // any cycle to be unbounded cycle.
2252   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2253   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2254   if (!SE || !LI) {
2255     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2256       if (SCCI.hasCycle())
2257         return true;
2258     return false;
2259   }
2260 
2261   // If there's irreducible control, the function may contain non-loop cycles.
2262   if (mayContainIrreducibleControl(F, LI))
2263     return true;
2264 
2265   // Any loop that does not have a max trip count is considered unbounded cycle.
2266   for (auto *L : LI->getLoopsInPreorder()) {
2267     if (!SE->getSmallConstantMaxTripCount(L))
2268       return true;
2269   }
2270   return false;
2271 }
2272 
2273 struct AAWillReturnImpl : public AAWillReturn {
2274   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2275       : AAWillReturn(IRP, A) {}
2276 
2277   /// See AbstractAttribute::initialize(...).
2278   void initialize(Attributor &A) override {
2279     AAWillReturn::initialize(A);
2280 
2281     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ true)) {
2282       indicateOptimisticFixpoint();
2283       return;
2284     }
2285   }
2286 
2287   /// Check for `mustprogress` and `readonly` as they imply `willreturn`.
2288   bool isImpliedByMustprogressAndReadonly(Attributor &A, bool KnownOnly) {
2289     // Check for `mustprogress` in the scope and the associated function which
2290     // might be different if this is a call site.
2291     if ((!getAnchorScope() || !getAnchorScope()->mustProgress()) &&
2292         (!getAssociatedFunction() || !getAssociatedFunction()->mustProgress()))
2293       return false;
2294 
2295     const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2296                                                       DepClassTy::NONE);
2297     if (!MemAA.isAssumedReadOnly())
2298       return false;
2299     if (KnownOnly && !MemAA.isKnownReadOnly())
2300       return false;
2301     if (!MemAA.isKnownReadOnly())
2302       A.recordDependence(MemAA, *this, DepClassTy::OPTIONAL);
2303 
2304     return true;
2305   }
2306 
2307   /// See AbstractAttribute::updateImpl(...).
2308   ChangeStatus updateImpl(Attributor &A) override {
2309     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2310       return ChangeStatus::UNCHANGED;
2311 
2312     auto CheckForWillReturn = [&](Instruction &I) {
2313       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2314       const auto &WillReturnAA =
2315           A.getAAFor<AAWillReturn>(*this, IPos, DepClassTy::REQUIRED);
2316       if (WillReturnAA.isKnownWillReturn())
2317         return true;
2318       if (!WillReturnAA.isAssumedWillReturn())
2319         return false;
2320       const auto &NoRecurseAA =
2321           A.getAAFor<AANoRecurse>(*this, IPos, DepClassTy::REQUIRED);
2322       return NoRecurseAA.isAssumedNoRecurse();
2323     };
2324 
2325     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2326       return indicatePessimisticFixpoint();
2327 
2328     return ChangeStatus::UNCHANGED;
2329   }
2330 
2331   /// See AbstractAttribute::getAsStr()
2332   const std::string getAsStr() const override {
2333     return getAssumed() ? "willreturn" : "may-noreturn";
2334   }
2335 };
2336 
2337 struct AAWillReturnFunction final : AAWillReturnImpl {
2338   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2339       : AAWillReturnImpl(IRP, A) {}
2340 
2341   /// See AbstractAttribute::initialize(...).
2342   void initialize(Attributor &A) override {
2343     AAWillReturnImpl::initialize(A);
2344 
2345     Function *F = getAnchorScope();
2346     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2347       indicatePessimisticFixpoint();
2348   }
2349 
2350   /// See AbstractAttribute::trackStatistics()
2351   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2352 };
2353 
2354 /// WillReturn attribute deduction for a call sites.
2355 struct AAWillReturnCallSite final : AAWillReturnImpl {
2356   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2357       : AAWillReturnImpl(IRP, A) {}
2358 
2359   /// See AbstractAttribute::initialize(...).
2360   void initialize(Attributor &A) override {
2361     AAWillReturnImpl::initialize(A);
2362     Function *F = getAssociatedFunction();
2363     if (!F || !A.isFunctionIPOAmendable(*F))
2364       indicatePessimisticFixpoint();
2365   }
2366 
2367   /// See AbstractAttribute::updateImpl(...).
2368   ChangeStatus updateImpl(Attributor &A) override {
2369     if (isImpliedByMustprogressAndReadonly(A, /* KnownOnly */ false))
2370       return ChangeStatus::UNCHANGED;
2371 
2372     // TODO: Once we have call site specific value information we can provide
2373     //       call site specific liveness information and then it makes
2374     //       sense to specialize attributes for call sites arguments instead of
2375     //       redirecting requests to the callee argument.
2376     Function *F = getAssociatedFunction();
2377     const IRPosition &FnPos = IRPosition::function(*F);
2378     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos, DepClassTy::REQUIRED);
2379     return clampStateAndIndicateChange(getState(), FnAA.getState());
2380   }
2381 
2382   /// See AbstractAttribute::trackStatistics()
2383   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2384 };
2385 
2386 /// -------------------AAReachability Attribute--------------------------
2387 
2388 struct AAReachabilityImpl : AAReachability {
2389   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2390       : AAReachability(IRP, A) {}
2391 
2392   const std::string getAsStr() const override {
2393     // TODO: Return the number of reachable queries.
2394     return "reachable";
2395   }
2396 
2397   /// See AbstractAttribute::initialize(...).
2398   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2399 
2400   /// See AbstractAttribute::updateImpl(...).
2401   ChangeStatus updateImpl(Attributor &A) override {
2402     return indicatePessimisticFixpoint();
2403   }
2404 };
2405 
2406 struct AAReachabilityFunction final : public AAReachabilityImpl {
2407   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2408       : AAReachabilityImpl(IRP, A) {}
2409 
2410   /// See AbstractAttribute::trackStatistics()
2411   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2412 };
2413 
2414 /// ------------------------ NoAlias Argument Attribute ------------------------
2415 
2416 struct AANoAliasImpl : AANoAlias {
2417   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2418     assert(getAssociatedType()->isPointerTy() &&
2419            "Noalias is a pointer attribute");
2420   }
2421 
2422   const std::string getAsStr() const override {
2423     return getAssumed() ? "noalias" : "may-alias";
2424   }
2425 };
2426 
2427 /// NoAlias attribute for a floating value.
2428 struct AANoAliasFloating final : AANoAliasImpl {
2429   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2430       : AANoAliasImpl(IRP, A) {}
2431 
2432   /// See AbstractAttribute::initialize(...).
2433   void initialize(Attributor &A) override {
2434     AANoAliasImpl::initialize(A);
2435     Value *Val = &getAssociatedValue();
2436     do {
2437       CastInst *CI = dyn_cast<CastInst>(Val);
2438       if (!CI)
2439         break;
2440       Value *Base = CI->getOperand(0);
2441       if (!Base->hasOneUse())
2442         break;
2443       Val = Base;
2444     } while (true);
2445 
2446     if (!Val->getType()->isPointerTy()) {
2447       indicatePessimisticFixpoint();
2448       return;
2449     }
2450 
2451     if (isa<AllocaInst>(Val))
2452       indicateOptimisticFixpoint();
2453     else if (isa<ConstantPointerNull>(Val) &&
2454              !NullPointerIsDefined(getAnchorScope(),
2455                                    Val->getType()->getPointerAddressSpace()))
2456       indicateOptimisticFixpoint();
2457     else if (Val != &getAssociatedValue()) {
2458       const auto &ValNoAliasAA = A.getAAFor<AANoAlias>(
2459           *this, IRPosition::value(*Val), DepClassTy::OPTIONAL);
2460       if (ValNoAliasAA.isKnownNoAlias())
2461         indicateOptimisticFixpoint();
2462     }
2463   }
2464 
2465   /// See AbstractAttribute::updateImpl(...).
2466   ChangeStatus updateImpl(Attributor &A) override {
2467     // TODO: Implement this.
2468     return indicatePessimisticFixpoint();
2469   }
2470 
2471   /// See AbstractAttribute::trackStatistics()
2472   void trackStatistics() const override {
2473     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2474   }
2475 };
2476 
2477 /// NoAlias attribute for an argument.
2478 struct AANoAliasArgument final
2479     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2480   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
2481   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2482 
2483   /// See AbstractAttribute::initialize(...).
2484   void initialize(Attributor &A) override {
2485     Base::initialize(A);
2486     // See callsite argument attribute and callee argument attribute.
2487     if (hasAttr({Attribute::ByVal}))
2488       indicateOptimisticFixpoint();
2489   }
2490 
2491   /// See AbstractAttribute::update(...).
2492   ChangeStatus updateImpl(Attributor &A) override {
2493     // We have to make sure no-alias on the argument does not break
2494     // synchronization when this is a callback argument, see also [1] below.
2495     // If synchronization cannot be affected, we delegate to the base updateImpl
2496     // function, otherwise we give up for now.
2497 
2498     // If the function is no-sync, no-alias cannot break synchronization.
2499     const auto &NoSyncAA =
2500         A.getAAFor<AANoSync>(*this, IRPosition::function_scope(getIRPosition()),
2501                              DepClassTy::OPTIONAL);
2502     if (NoSyncAA.isAssumedNoSync())
2503       return Base::updateImpl(A);
2504 
2505     // If the argument is read-only, no-alias cannot break synchronization.
2506     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2507         *this, getIRPosition(), DepClassTy::OPTIONAL);
2508     if (MemBehaviorAA.isAssumedReadOnly())
2509       return Base::updateImpl(A);
2510 
2511     // If the argument is never passed through callbacks, no-alias cannot break
2512     // synchronization.
2513     bool AllCallSitesKnown;
2514     if (A.checkForAllCallSites(
2515             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2516             true, AllCallSitesKnown))
2517       return Base::updateImpl(A);
2518 
2519     // TODO: add no-alias but make sure it doesn't break synchronization by
2520     // introducing fake uses. See:
2521     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2522     //     International Workshop on OpenMP 2018,
2523     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2524 
2525     return indicatePessimisticFixpoint();
2526   }
2527 
2528   /// See AbstractAttribute::trackStatistics()
2529   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2530 };
2531 
2532 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
2533   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2534       : AANoAliasImpl(IRP, A) {}
2535 
2536   /// See AbstractAttribute::initialize(...).
2537   void initialize(Attributor &A) override {
2538     // See callsite argument attribute and callee argument attribute.
2539     const auto &CB = cast<CallBase>(getAnchorValue());
2540     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2541       indicateOptimisticFixpoint();
2542     Value &Val = getAssociatedValue();
2543     if (isa<ConstantPointerNull>(Val) &&
2544         !NullPointerIsDefined(getAnchorScope(),
2545                               Val.getType()->getPointerAddressSpace()))
2546       indicateOptimisticFixpoint();
2547   }
2548 
2549   /// Determine if the underlying value may alias with the call site argument
2550   /// \p OtherArgNo of \p ICS (= the underlying call site).
2551   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2552                             const AAMemoryBehavior &MemBehaviorAA,
2553                             const CallBase &CB, unsigned OtherArgNo) {
2554     // We do not need to worry about aliasing with the underlying IRP.
2555     if (this->getCalleeArgNo() == (int)OtherArgNo)
2556       return false;
2557 
2558     // If it is not a pointer or pointer vector we do not alias.
2559     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2560     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2561       return false;
2562 
2563     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2564         *this, IRPosition::callsite_argument(CB, OtherArgNo), DepClassTy::NONE);
2565 
2566     // If the argument is readnone, there is no read-write aliasing.
2567     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2568       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2569       return false;
2570     }
2571 
2572     // If the argument is readonly and the underlying value is readonly, there
2573     // is no read-write aliasing.
2574     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2575     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2576       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2577       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2578       return false;
2579     }
2580 
2581     // We have to utilize actual alias analysis queries so we need the object.
2582     if (!AAR)
2583       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2584 
2585     // Try to rule it out at the call site.
2586     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2587     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2588                          "callsite arguments: "
2589                       << getAssociatedValue() << " " << *ArgOp << " => "
2590                       << (IsAliasing ? "" : "no-") << "alias \n");
2591 
2592     return IsAliasing;
2593   }
2594 
2595   bool
2596   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2597                                          const AAMemoryBehavior &MemBehaviorAA,
2598                                          const AANoAlias &NoAliasAA) {
2599     // We can deduce "noalias" if the following conditions hold.
2600     // (i)   Associated value is assumed to be noalias in the definition.
2601     // (ii)  Associated value is assumed to be no-capture in all the uses
2602     //       possibly executed before this callsite.
2603     // (iii) There is no other pointer argument which could alias with the
2604     //       value.
2605 
2606     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2607     if (!AssociatedValueIsNoAliasAtDef) {
2608       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2609                         << " is not no-alias at the definition\n");
2610       return false;
2611     }
2612 
2613     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2614 
2615     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2616     const Function *ScopeFn = VIRP.getAnchorScope();
2617     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, VIRP, DepClassTy::NONE);
2618     // Check whether the value is captured in the scope using AANoCapture.
2619     //      Look at CFG and check only uses possibly executed before this
2620     //      callsite.
2621     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2622       Instruction *UserI = cast<Instruction>(U.getUser());
2623 
2624       // If UserI is the curr instruction and there is a single potential use of
2625       // the value in UserI we allow the use.
2626       // TODO: We should inspect the operands and allow those that cannot alias
2627       //       with the value.
2628       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2629         return true;
2630 
2631       if (ScopeFn) {
2632         const auto &ReachabilityAA = A.getAAFor<AAReachability>(
2633             *this, IRPosition::function(*ScopeFn), DepClassTy::OPTIONAL);
2634 
2635         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2636           return true;
2637 
2638         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2639           if (CB->isArgOperand(&U)) {
2640 
2641             unsigned ArgNo = CB->getArgOperandNo(&U);
2642 
2643             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2644                 *this, IRPosition::callsite_argument(*CB, ArgNo),
2645                 DepClassTy::OPTIONAL);
2646 
2647             if (NoCaptureAA.isAssumedNoCapture())
2648               return true;
2649           }
2650         }
2651       }
2652 
2653       // For cases which can potentially have more users
2654       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2655           isa<SelectInst>(U)) {
2656         Follow = true;
2657         return true;
2658       }
2659 
2660       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2661       return false;
2662     };
2663 
2664     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2665       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2666         LLVM_DEBUG(
2667             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2668                    << " cannot be noalias as it is potentially captured\n");
2669         return false;
2670       }
2671     }
2672     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2673 
2674     // Check there is no other pointer argument which could alias with the
2675     // value passed at this call site.
2676     // TODO: AbstractCallSite
2677     const auto &CB = cast<CallBase>(getAnchorValue());
2678     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2679          OtherArgNo++)
2680       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2681         return false;
2682 
2683     return true;
2684   }
2685 
2686   /// See AbstractAttribute::updateImpl(...).
2687   ChangeStatus updateImpl(Attributor &A) override {
2688     // If the argument is readnone we are done as there are no accesses via the
2689     // argument.
2690     auto &MemBehaviorAA =
2691         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
2692     if (MemBehaviorAA.isAssumedReadNone()) {
2693       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2694       return ChangeStatus::UNCHANGED;
2695     }
2696 
2697     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2698     const auto &NoAliasAA =
2699         A.getAAFor<AANoAlias>(*this, VIRP, DepClassTy::NONE);
2700 
2701     AAResults *AAR = nullptr;
2702     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2703                                                NoAliasAA)) {
2704       LLVM_DEBUG(
2705           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2706       return ChangeStatus::UNCHANGED;
2707     }
2708 
2709     return indicatePessimisticFixpoint();
2710   }
2711 
2712   /// See AbstractAttribute::trackStatistics()
2713   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2714 };
2715 
2716 /// NoAlias attribute for function return value.
2717 struct AANoAliasReturned final : AANoAliasImpl {
2718   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2719       : AANoAliasImpl(IRP, A) {}
2720 
2721   /// See AbstractAttribute::initialize(...).
2722   void initialize(Attributor &A) override {
2723     AANoAliasImpl::initialize(A);
2724     Function *F = getAssociatedFunction();
2725     if (!F || F->isDeclaration())
2726       indicatePessimisticFixpoint();
2727   }
2728 
2729   /// See AbstractAttribute::updateImpl(...).
2730   virtual ChangeStatus updateImpl(Attributor &A) override {
2731 
2732     auto CheckReturnValue = [&](Value &RV) -> bool {
2733       if (Constant *C = dyn_cast<Constant>(&RV))
2734         if (C->isNullValue() || isa<UndefValue>(C))
2735           return true;
2736 
2737       /// For now, we can only deduce noalias if we have call sites.
2738       /// FIXME: add more support.
2739       if (!isa<CallBase>(&RV))
2740         return false;
2741 
2742       const IRPosition &RVPos = IRPosition::value(RV);
2743       const auto &NoAliasAA =
2744           A.getAAFor<AANoAlias>(*this, RVPos, DepClassTy::REQUIRED);
2745       if (!NoAliasAA.isAssumedNoAlias())
2746         return false;
2747 
2748       const auto &NoCaptureAA =
2749           A.getAAFor<AANoCapture>(*this, RVPos, DepClassTy::REQUIRED);
2750       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2751     };
2752 
2753     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2754       return indicatePessimisticFixpoint();
2755 
2756     return ChangeStatus::UNCHANGED;
2757   }
2758 
2759   /// See AbstractAttribute::trackStatistics()
2760   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2761 };
2762 
2763 /// NoAlias attribute deduction for a call site return value.
2764 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
2765   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2766       : AANoAliasImpl(IRP, A) {}
2767 
2768   /// See AbstractAttribute::initialize(...).
2769   void initialize(Attributor &A) override {
2770     AANoAliasImpl::initialize(A);
2771     Function *F = getAssociatedFunction();
2772     if (!F || F->isDeclaration())
2773       indicatePessimisticFixpoint();
2774   }
2775 
2776   /// See AbstractAttribute::updateImpl(...).
2777   ChangeStatus updateImpl(Attributor &A) override {
2778     // TODO: Once we have call site specific value information we can provide
2779     //       call site specific liveness information and then it makes
2780     //       sense to specialize attributes for call sites arguments instead of
2781     //       redirecting requests to the callee argument.
2782     Function *F = getAssociatedFunction();
2783     const IRPosition &FnPos = IRPosition::returned(*F);
2784     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos, DepClassTy::REQUIRED);
2785     return clampStateAndIndicateChange(getState(), FnAA.getState());
2786   }
2787 
2788   /// See AbstractAttribute::trackStatistics()
2789   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2790 };
2791 
2792 /// -------------------AAIsDead Function Attribute-----------------------
2793 
2794 struct AAIsDeadValueImpl : public AAIsDead {
2795   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2796 
2797   /// See AAIsDead::isAssumedDead().
2798   bool isAssumedDead() const override { return getAssumed(); }
2799 
2800   /// See AAIsDead::isKnownDead().
2801   bool isKnownDead() const override { return getKnown(); }
2802 
2803   /// See AAIsDead::isAssumedDead(BasicBlock *).
2804   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2805 
2806   /// See AAIsDead::isKnownDead(BasicBlock *).
2807   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2808 
2809   /// See AAIsDead::isAssumedDead(Instruction *I).
2810   bool isAssumedDead(const Instruction *I) const override {
2811     return I == getCtxI() && isAssumedDead();
2812   }
2813 
2814   /// See AAIsDead::isKnownDead(Instruction *I).
2815   bool isKnownDead(const Instruction *I) const override {
2816     return isAssumedDead(I) && getKnown();
2817   }
2818 
2819   /// See AbstractAttribute::getAsStr().
2820   const std::string getAsStr() const override {
2821     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2822   }
2823 
2824   /// Check if all uses are assumed dead.
2825   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2826     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2827     // Explicitly set the dependence class to required because we want a long
2828     // chain of N dependent instructions to be considered live as soon as one is
2829     // without going through N update cycles. This is not required for
2830     // correctness.
2831     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2832   }
2833 
2834   /// Determine if \p I is assumed to be side-effect free.
2835   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2836     if (!I || wouldInstructionBeTriviallyDead(I))
2837       return true;
2838 
2839     auto *CB = dyn_cast<CallBase>(I);
2840     if (!CB || isa<IntrinsicInst>(CB))
2841       return false;
2842 
2843     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2844     const auto &NoUnwindAA =
2845         A.getAndUpdateAAFor<AANoUnwind>(*this, CallIRP, DepClassTy::NONE);
2846     if (!NoUnwindAA.isAssumedNoUnwind())
2847       return false;
2848     if (!NoUnwindAA.isKnownNoUnwind())
2849       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2850 
2851     const auto &MemBehaviorAA =
2852         A.getAndUpdateAAFor<AAMemoryBehavior>(*this, CallIRP, DepClassTy::NONE);
2853     if (MemBehaviorAA.isAssumedReadOnly()) {
2854       if (!MemBehaviorAA.isKnownReadOnly())
2855         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2856       return true;
2857     }
2858     return false;
2859   }
2860 };
2861 
2862 struct AAIsDeadFloating : public AAIsDeadValueImpl {
2863   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2864       : AAIsDeadValueImpl(IRP, A) {}
2865 
2866   /// See AbstractAttribute::initialize(...).
2867   void initialize(Attributor &A) override {
2868     if (isa<UndefValue>(getAssociatedValue())) {
2869       indicatePessimisticFixpoint();
2870       return;
2871     }
2872 
2873     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2874     if (!isAssumedSideEffectFree(A, I))
2875       indicatePessimisticFixpoint();
2876   }
2877 
2878   /// See AbstractAttribute::updateImpl(...).
2879   ChangeStatus updateImpl(Attributor &A) override {
2880     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2881     if (!isAssumedSideEffectFree(A, I))
2882       return indicatePessimisticFixpoint();
2883 
2884     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2885       return indicatePessimisticFixpoint();
2886     return ChangeStatus::UNCHANGED;
2887   }
2888 
2889   /// See AbstractAttribute::manifest(...).
2890   ChangeStatus manifest(Attributor &A) override {
2891     Value &V = getAssociatedValue();
2892     if (auto *I = dyn_cast<Instruction>(&V)) {
2893       // If we get here we basically know the users are all dead. We check if
2894       // isAssumedSideEffectFree returns true here again because it might not be
2895       // the case and only the users are dead but the instruction (=call) is
2896       // still needed.
2897       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2898         A.deleteAfterManifest(*I);
2899         return ChangeStatus::CHANGED;
2900       }
2901     }
2902     if (V.use_empty())
2903       return ChangeStatus::UNCHANGED;
2904 
2905     bool UsedAssumedInformation = false;
2906     Optional<Constant *> C =
2907         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2908     if (C.hasValue() && C.getValue())
2909       return ChangeStatus::UNCHANGED;
2910 
2911     // Replace the value with undef as it is dead but keep droppable uses around
2912     // as they provide information we don't want to give up on just yet.
2913     UndefValue &UV = *UndefValue::get(V.getType());
2914     bool AnyChange =
2915         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2916     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2917   }
2918 
2919   /// See AbstractAttribute::trackStatistics()
2920   void trackStatistics() const override {
2921     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2922   }
2923 };
2924 
2925 struct AAIsDeadArgument : public AAIsDeadFloating {
2926   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2927       : AAIsDeadFloating(IRP, A) {}
2928 
2929   /// See AbstractAttribute::initialize(...).
2930   void initialize(Attributor &A) override {
2931     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2932       indicatePessimisticFixpoint();
2933   }
2934 
2935   /// See AbstractAttribute::manifest(...).
2936   ChangeStatus manifest(Attributor &A) override {
2937     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2938     Argument &Arg = *getAssociatedArgument();
2939     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2940       if (A.registerFunctionSignatureRewrite(
2941               Arg, /* ReplacementTypes */ {},
2942               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2943               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2944         Arg.dropDroppableUses();
2945         return ChangeStatus::CHANGED;
2946       }
2947     return Changed;
2948   }
2949 
2950   /// See AbstractAttribute::trackStatistics()
2951   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2952 };
2953 
2954 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
2955   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2956       : AAIsDeadValueImpl(IRP, A) {}
2957 
2958   /// See AbstractAttribute::initialize(...).
2959   void initialize(Attributor &A) override {
2960     if (isa<UndefValue>(getAssociatedValue()))
2961       indicatePessimisticFixpoint();
2962   }
2963 
2964   /// See AbstractAttribute::updateImpl(...).
2965   ChangeStatus updateImpl(Attributor &A) override {
2966     // TODO: Once we have call site specific value information we can provide
2967     //       call site specific liveness information and then it makes
2968     //       sense to specialize attributes for call sites arguments instead of
2969     //       redirecting requests to the callee argument.
2970     Argument *Arg = getAssociatedArgument();
2971     if (!Arg)
2972       return indicatePessimisticFixpoint();
2973     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2974     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos, DepClassTy::REQUIRED);
2975     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2976   }
2977 
2978   /// See AbstractAttribute::manifest(...).
2979   ChangeStatus manifest(Attributor &A) override {
2980     CallBase &CB = cast<CallBase>(getAnchorValue());
2981     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2982     assert(!isa<UndefValue>(U.get()) &&
2983            "Expected undef values to be filtered out!");
2984     UndefValue &UV = *UndefValue::get(U->getType());
2985     if (A.changeUseAfterManifest(U, UV))
2986       return ChangeStatus::CHANGED;
2987     return ChangeStatus::UNCHANGED;
2988   }
2989 
2990   /// See AbstractAttribute::trackStatistics()
2991   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2992 };
2993 
2994 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
2995   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2996       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2997 
2998   /// See AAIsDead::isAssumedDead().
2999   bool isAssumedDead() const override {
3000     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
3001   }
3002 
3003   /// See AbstractAttribute::initialize(...).
3004   void initialize(Attributor &A) override {
3005     if (isa<UndefValue>(getAssociatedValue())) {
3006       indicatePessimisticFixpoint();
3007       return;
3008     }
3009 
3010     // We track this separately as a secondary state.
3011     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
3012   }
3013 
3014   /// See AbstractAttribute::updateImpl(...).
3015   ChangeStatus updateImpl(Attributor &A) override {
3016     ChangeStatus Changed = ChangeStatus::UNCHANGED;
3017     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
3018       IsAssumedSideEffectFree = false;
3019       Changed = ChangeStatus::CHANGED;
3020     }
3021 
3022     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
3023       return indicatePessimisticFixpoint();
3024     return Changed;
3025   }
3026 
3027   /// See AbstractAttribute::trackStatistics()
3028   void trackStatistics() const override {
3029     if (IsAssumedSideEffectFree)
3030       STATS_DECLTRACK_CSRET_ATTR(IsDead)
3031     else
3032       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
3033   }
3034 
3035   /// See AbstractAttribute::getAsStr().
3036   const std::string getAsStr() const override {
3037     return isAssumedDead()
3038                ? "assumed-dead"
3039                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3040   }
3041 
3042 private:
3043   bool IsAssumedSideEffectFree;
3044 };
3045 
3046 struct AAIsDeadReturned : public AAIsDeadValueImpl {
3047   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3048       : AAIsDeadValueImpl(IRP, A) {}
3049 
3050   /// See AbstractAttribute::updateImpl(...).
3051   ChangeStatus updateImpl(Attributor &A) override {
3052 
3053     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3054                               {Instruction::Ret});
3055 
3056     auto PredForCallSite = [&](AbstractCallSite ACS) {
3057       if (ACS.isCallbackCall() || !ACS.getInstruction())
3058         return false;
3059       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3060     };
3061 
3062     bool AllCallSitesKnown;
3063     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3064                                 AllCallSitesKnown))
3065       return indicatePessimisticFixpoint();
3066 
3067     return ChangeStatus::UNCHANGED;
3068   }
3069 
3070   /// See AbstractAttribute::manifest(...).
3071   ChangeStatus manifest(Attributor &A) override {
3072     // TODO: Rewrite the signature to return void?
3073     bool AnyChange = false;
3074     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3075     auto RetInstPred = [&](Instruction &I) {
3076       ReturnInst &RI = cast<ReturnInst>(I);
3077       if (!isa<UndefValue>(RI.getReturnValue()))
3078         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3079       return true;
3080     };
3081     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3082     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3083   }
3084 
3085   /// See AbstractAttribute::trackStatistics()
3086   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3087 };
3088 
3089 struct AAIsDeadFunction : public AAIsDead {
3090   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3091 
3092   /// See AbstractAttribute::initialize(...).
3093   void initialize(Attributor &A) override {
3094     const Function *F = getAnchorScope();
3095     if (F && !F->isDeclaration()) {
3096       // We only want to compute liveness once. If the function is not part of
3097       // the SCC, skip it.
3098       if (A.isRunOn(*const_cast<Function *>(F))) {
3099         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3100         assumeLive(A, F->getEntryBlock());
3101       } else {
3102         indicatePessimisticFixpoint();
3103       }
3104     }
3105   }
3106 
3107   /// See AbstractAttribute::getAsStr().
3108   const std::string getAsStr() const override {
3109     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3110            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3111            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3112            std::to_string(KnownDeadEnds.size()) + "]";
3113   }
3114 
3115   /// See AbstractAttribute::manifest(...).
3116   ChangeStatus manifest(Attributor &A) override {
3117     assert(getState().isValidState() &&
3118            "Attempted to manifest an invalid state!");
3119 
3120     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3121     Function &F = *getAnchorScope();
3122 
3123     if (AssumedLiveBlocks.empty()) {
3124       A.deleteAfterManifest(F);
3125       return ChangeStatus::CHANGED;
3126     }
3127 
3128     // Flag to determine if we can change an invoke to a call assuming the
3129     // callee is nounwind. This is not possible if the personality of the
3130     // function allows to catch asynchronous exceptions.
3131     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3132 
3133     KnownDeadEnds.set_union(ToBeExploredFrom);
3134     for (const Instruction *DeadEndI : KnownDeadEnds) {
3135       auto *CB = dyn_cast<CallBase>(DeadEndI);
3136       if (!CB)
3137         continue;
3138       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3139           *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
3140       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3141       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3142         continue;
3143 
3144       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3145         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3146       else
3147         A.changeToUnreachableAfterManifest(
3148             const_cast<Instruction *>(DeadEndI->getNextNode()));
3149       HasChanged = ChangeStatus::CHANGED;
3150     }
3151 
3152     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3153     for (BasicBlock &BB : F)
3154       if (!AssumedLiveBlocks.count(&BB)) {
3155         A.deleteAfterManifest(BB);
3156         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3157       }
3158 
3159     return HasChanged;
3160   }
3161 
3162   /// See AbstractAttribute::updateImpl(...).
3163   ChangeStatus updateImpl(Attributor &A) override;
3164 
3165   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3166     return !AssumedLiveEdges.count(std::make_pair(From, To));
3167   }
3168 
3169   /// See AbstractAttribute::trackStatistics()
3170   void trackStatistics() const override {}
3171 
3172   /// Returns true if the function is assumed dead.
3173   bool isAssumedDead() const override { return false; }
3174 
3175   /// See AAIsDead::isKnownDead().
3176   bool isKnownDead() const override { return false; }
3177 
3178   /// See AAIsDead::isAssumedDead(BasicBlock *).
3179   bool isAssumedDead(const BasicBlock *BB) const override {
3180     assert(BB->getParent() == getAnchorScope() &&
3181            "BB must be in the same anchor scope function.");
3182 
3183     if (!getAssumed())
3184       return false;
3185     return !AssumedLiveBlocks.count(BB);
3186   }
3187 
3188   /// See AAIsDead::isKnownDead(BasicBlock *).
3189   bool isKnownDead(const BasicBlock *BB) const override {
3190     return getKnown() && isAssumedDead(BB);
3191   }
3192 
3193   /// See AAIsDead::isAssumed(Instruction *I).
3194   bool isAssumedDead(const Instruction *I) const override {
3195     assert(I->getParent()->getParent() == getAnchorScope() &&
3196            "Instruction must be in the same anchor scope function.");
3197 
3198     if (!getAssumed())
3199       return false;
3200 
3201     // If it is not in AssumedLiveBlocks then it for sure dead.
3202     // Otherwise, it can still be after noreturn call in a live block.
3203     if (!AssumedLiveBlocks.count(I->getParent()))
3204       return true;
3205 
3206     // If it is not after a liveness barrier it is live.
3207     const Instruction *PrevI = I->getPrevNode();
3208     while (PrevI) {
3209       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3210         return true;
3211       PrevI = PrevI->getPrevNode();
3212     }
3213     return false;
3214   }
3215 
3216   /// See AAIsDead::isKnownDead(Instruction *I).
3217   bool isKnownDead(const Instruction *I) const override {
3218     return getKnown() && isAssumedDead(I);
3219   }
3220 
3221   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3222   /// that internal function called from \p BB should now be looked at.
3223   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3224     if (!AssumedLiveBlocks.insert(&BB).second)
3225       return false;
3226 
3227     // We assume that all of BB is (probably) live now and if there are calls to
3228     // internal functions we will assume that those are now live as well. This
3229     // is a performance optimization for blocks with calls to a lot of internal
3230     // functions. It can however cause dead functions to be treated as live.
3231     for (const Instruction &I : BB)
3232       if (const auto *CB = dyn_cast<CallBase>(&I))
3233         if (const Function *F = CB->getCalledFunction())
3234           if (F->hasLocalLinkage())
3235             A.markLiveInternalFunction(*F);
3236     return true;
3237   }
3238 
3239   /// Collection of instructions that need to be explored again, e.g., we
3240   /// did assume they do not transfer control to (one of their) successors.
3241   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3242 
3243   /// Collection of instructions that are known to not transfer control.
3244   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3245 
3246   /// Collection of all assumed live edges
3247   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3248 
3249   /// Collection of all assumed live BasicBlocks.
3250   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3251 };
3252 
3253 static bool
3254 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3255                         AbstractAttribute &AA,
3256                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3257   const IRPosition &IPos = IRPosition::callsite_function(CB);
3258 
3259   const auto &NoReturnAA =
3260       A.getAndUpdateAAFor<AANoReturn>(AA, IPos, DepClassTy::OPTIONAL);
3261   if (NoReturnAA.isAssumedNoReturn())
3262     return !NoReturnAA.isKnownNoReturn();
3263   if (CB.isTerminator())
3264     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3265   else
3266     AliveSuccessors.push_back(CB.getNextNode());
3267   return false;
3268 }
3269 
3270 static bool
3271 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3272                         AbstractAttribute &AA,
3273                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3274   bool UsedAssumedInformation =
3275       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3276 
3277   // First, determine if we can change an invoke to a call assuming the
3278   // callee is nounwind. This is not possible if the personality of the
3279   // function allows to catch asynchronous exceptions.
3280   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3281     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3282   } else {
3283     const IRPosition &IPos = IRPosition::callsite_function(II);
3284     const auto &AANoUnw =
3285         A.getAndUpdateAAFor<AANoUnwind>(AA, IPos, DepClassTy::OPTIONAL);
3286     if (AANoUnw.isAssumedNoUnwind()) {
3287       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3288     } else {
3289       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3290     }
3291   }
3292   return UsedAssumedInformation;
3293 }
3294 
3295 static bool
3296 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3297                         AbstractAttribute &AA,
3298                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3299   bool UsedAssumedInformation = false;
3300   if (BI.getNumSuccessors() == 1) {
3301     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3302   } else {
3303     Optional<ConstantInt *> CI = getAssumedConstantInt(
3304         A, *BI.getCondition(), AA, UsedAssumedInformation);
3305     if (!CI.hasValue()) {
3306       // No value yet, assume both edges are dead.
3307     } else if (CI.getValue()) {
3308       const BasicBlock *SuccBB =
3309           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3310       AliveSuccessors.push_back(&SuccBB->front());
3311     } else {
3312       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3313       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3314       UsedAssumedInformation = false;
3315     }
3316   }
3317   return UsedAssumedInformation;
3318 }
3319 
3320 static bool
3321 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3322                         AbstractAttribute &AA,
3323                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3324   bool UsedAssumedInformation = false;
3325   Optional<ConstantInt *> CI =
3326       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3327   if (!CI.hasValue()) {
3328     // No value yet, assume all edges are dead.
3329   } else if (CI.getValue()) {
3330     for (auto &CaseIt : SI.cases()) {
3331       if (CaseIt.getCaseValue() == CI.getValue()) {
3332         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3333         return UsedAssumedInformation;
3334       }
3335     }
3336     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3337     return UsedAssumedInformation;
3338   } else {
3339     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3340       AliveSuccessors.push_back(&SuccBB->front());
3341   }
3342   return UsedAssumedInformation;
3343 }
3344 
3345 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3346   ChangeStatus Change = ChangeStatus::UNCHANGED;
3347 
3348   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3349                     << getAnchorScope()->size() << "] BBs and "
3350                     << ToBeExploredFrom.size() << " exploration points and "
3351                     << KnownDeadEnds.size() << " known dead ends\n");
3352 
3353   // Copy and clear the list of instructions we need to explore from. It is
3354   // refilled with instructions the next update has to look at.
3355   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3356                                                ToBeExploredFrom.end());
3357   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3358 
3359   SmallVector<const Instruction *, 8> AliveSuccessors;
3360   while (!Worklist.empty()) {
3361     const Instruction *I = Worklist.pop_back_val();
3362     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3363 
3364     // Fast forward for uninteresting instructions. We could look for UB here
3365     // though.
3366     while (!I->isTerminator() && !isa<CallBase>(I)) {
3367       Change = ChangeStatus::CHANGED;
3368       I = I->getNextNode();
3369     }
3370 
3371     AliveSuccessors.clear();
3372 
3373     bool UsedAssumedInformation = false;
3374     switch (I->getOpcode()) {
3375     // TODO: look for (assumed) UB to backwards propagate "deadness".
3376     default:
3377       assert(I->isTerminator() &&
3378              "Expected non-terminators to be handled already!");
3379       for (const BasicBlock *SuccBB : successors(I->getParent()))
3380         AliveSuccessors.push_back(&SuccBB->front());
3381       break;
3382     case Instruction::Call:
3383       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3384                                                        *this, AliveSuccessors);
3385       break;
3386     case Instruction::Invoke:
3387       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3388                                                        *this, AliveSuccessors);
3389       break;
3390     case Instruction::Br:
3391       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3392                                                        *this, AliveSuccessors);
3393       break;
3394     case Instruction::Switch:
3395       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3396                                                        *this, AliveSuccessors);
3397       break;
3398     }
3399 
3400     if (UsedAssumedInformation) {
3401       NewToBeExploredFrom.insert(I);
3402     } else {
3403       Change = ChangeStatus::CHANGED;
3404       if (AliveSuccessors.empty() ||
3405           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3406         KnownDeadEnds.insert(I);
3407     }
3408 
3409     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3410                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3411                       << UsedAssumedInformation << "\n");
3412 
3413     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3414       if (!I->isTerminator()) {
3415         assert(AliveSuccessors.size() == 1 &&
3416                "Non-terminator expected to have a single successor!");
3417         Worklist.push_back(AliveSuccessor);
3418       } else {
3419         // record the assumed live edge
3420         AssumedLiveEdges.insert(
3421             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3422         if (assumeLive(A, *AliveSuccessor->getParent()))
3423           Worklist.push_back(AliveSuccessor);
3424       }
3425     }
3426   }
3427 
3428   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3429 
3430   // If we know everything is live there is no need to query for liveness.
3431   // Instead, indicating a pessimistic fixpoint will cause the state to be
3432   // "invalid" and all queries to be answered conservatively without lookups.
3433   // To be in this state we have to (1) finished the exploration and (3) not
3434   // discovered any non-trivial dead end and (2) not ruled unreachable code
3435   // dead.
3436   if (ToBeExploredFrom.empty() &&
3437       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3438       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3439         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3440       }))
3441     return indicatePessimisticFixpoint();
3442   return Change;
3443 }
3444 
3445 /// Liveness information for a call sites.
3446 struct AAIsDeadCallSite final : AAIsDeadFunction {
3447   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3448       : AAIsDeadFunction(IRP, A) {}
3449 
3450   /// See AbstractAttribute::initialize(...).
3451   void initialize(Attributor &A) override {
3452     // TODO: Once we have call site specific value information we can provide
3453     //       call site specific liveness information and then it makes
3454     //       sense to specialize attributes for call sites instead of
3455     //       redirecting requests to the callee.
3456     llvm_unreachable("Abstract attributes for liveness are not "
3457                      "supported for call sites yet!");
3458   }
3459 
3460   /// See AbstractAttribute::updateImpl(...).
3461   ChangeStatus updateImpl(Attributor &A) override {
3462     return indicatePessimisticFixpoint();
3463   }
3464 
3465   /// See AbstractAttribute::trackStatistics()
3466   void trackStatistics() const override {}
3467 };
3468 
3469 /// -------------------- Dereferenceable Argument Attribute --------------------
3470 
3471 template <>
3472 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3473                                                      const DerefState &R) {
3474   ChangeStatus CS0 =
3475       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3476   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3477   return CS0 | CS1;
3478 }
3479 
3480 struct AADereferenceableImpl : AADereferenceable {
3481   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3482       : AADereferenceable(IRP, A) {}
3483   using StateType = DerefState;
3484 
3485   /// See AbstractAttribute::initialize(...).
3486   void initialize(Attributor &A) override {
3487     SmallVector<Attribute, 4> Attrs;
3488     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3489              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3490     for (const Attribute &Attr : Attrs)
3491       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3492 
3493     const IRPosition &IRP = this->getIRPosition();
3494     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP, DepClassTy::NONE);
3495 
3496     bool CanBeNull, CanBeFreed;
3497     takeKnownDerefBytesMaximum(
3498         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3499             A.getDataLayout(), CanBeNull, CanBeFreed));
3500 
3501     bool IsFnInterface = IRP.isFnInterfaceKind();
3502     Function *FnScope = IRP.getAnchorScope();
3503     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3504       indicatePessimisticFixpoint();
3505       return;
3506     }
3507 
3508     if (Instruction *CtxI = getCtxI())
3509       followUsesInMBEC(*this, A, getState(), *CtxI);
3510   }
3511 
3512   /// See AbstractAttribute::getState()
3513   /// {
3514   StateType &getState() override { return *this; }
3515   const StateType &getState() const override { return *this; }
3516   /// }
3517 
3518   /// Helper function for collecting accessed bytes in must-be-executed-context
3519   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3520                               DerefState &State) {
3521     const Value *UseV = U->get();
3522     if (!UseV->getType()->isPointerTy())
3523       return;
3524 
3525     Type *PtrTy = UseV->getType();
3526     const DataLayout &DL = A.getDataLayout();
3527     int64_t Offset;
3528     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3529             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3530       if (Base == &getAssociatedValue() &&
3531           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3532         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3533         State.addAccessedBytes(Offset, Size);
3534       }
3535     }
3536   }
3537 
3538   /// See followUsesInMBEC
3539   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3540                        AADereferenceable::StateType &State) {
3541     bool IsNonNull = false;
3542     bool TrackUse = false;
3543     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3544         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3545     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3546                       << " for instruction " << *I << "\n");
3547 
3548     addAccessedBytesForUse(A, U, I, State);
3549     State.takeKnownDerefBytesMaximum(DerefBytes);
3550     return TrackUse;
3551   }
3552 
3553   /// See AbstractAttribute::manifest(...).
3554   ChangeStatus manifest(Attributor &A) override {
3555     ChangeStatus Change = AADereferenceable::manifest(A);
3556     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3557       removeAttrs({Attribute::DereferenceableOrNull});
3558       return ChangeStatus::CHANGED;
3559     }
3560     return Change;
3561   }
3562 
3563   void getDeducedAttributes(LLVMContext &Ctx,
3564                             SmallVectorImpl<Attribute> &Attrs) const override {
3565     // TODO: Add *_globally support
3566     if (isAssumedNonNull())
3567       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3568           Ctx, getAssumedDereferenceableBytes()));
3569     else
3570       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3571           Ctx, getAssumedDereferenceableBytes()));
3572   }
3573 
3574   /// See AbstractAttribute::getAsStr().
3575   const std::string getAsStr() const override {
3576     if (!getAssumedDereferenceableBytes())
3577       return "unknown-dereferenceable";
3578     return std::string("dereferenceable") +
3579            (isAssumedNonNull() ? "" : "_or_null") +
3580            (isAssumedGlobal() ? "_globally" : "") + "<" +
3581            std::to_string(getKnownDereferenceableBytes()) + "-" +
3582            std::to_string(getAssumedDereferenceableBytes()) + ">";
3583   }
3584 };
3585 
3586 /// Dereferenceable attribute for a floating value.
3587 struct AADereferenceableFloating : AADereferenceableImpl {
3588   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3589       : AADereferenceableImpl(IRP, A) {}
3590 
3591   /// See AbstractAttribute::updateImpl(...).
3592   ChangeStatus updateImpl(Attributor &A) override {
3593     const DataLayout &DL = A.getDataLayout();
3594 
3595     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3596                             bool Stripped) -> bool {
3597       unsigned IdxWidth =
3598           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3599       APInt Offset(IdxWidth, 0);
3600       const Value *Base =
3601           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3602 
3603       const auto &AA = A.getAAFor<AADereferenceable>(
3604           *this, IRPosition::value(*Base), DepClassTy::REQUIRED);
3605       int64_t DerefBytes = 0;
3606       if (!Stripped && this == &AA) {
3607         // Use IR information if we did not strip anything.
3608         // TODO: track globally.
3609         bool CanBeNull, CanBeFreed;
3610         DerefBytes =
3611           Base->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
3612         T.GlobalState.indicatePessimisticFixpoint();
3613       } else {
3614         const DerefState &DS = AA.getState();
3615         DerefBytes = DS.DerefBytesState.getAssumed();
3616         T.GlobalState &= DS.GlobalState;
3617       }
3618 
3619       // For now we do not try to "increase" dereferenceability due to negative
3620       // indices as we first have to come up with code to deal with loops and
3621       // for overflows of the dereferenceable bytes.
3622       int64_t OffsetSExt = Offset.getSExtValue();
3623       if (OffsetSExt < 0)
3624         OffsetSExt = 0;
3625 
3626       T.takeAssumedDerefBytesMinimum(
3627           std::max(int64_t(0), DerefBytes - OffsetSExt));
3628 
3629       if (this == &AA) {
3630         if (!Stripped) {
3631           // If nothing was stripped IR information is all we got.
3632           T.takeKnownDerefBytesMaximum(
3633               std::max(int64_t(0), DerefBytes - OffsetSExt));
3634           T.indicatePessimisticFixpoint();
3635         } else if (OffsetSExt > 0) {
3636           // If something was stripped but there is circular reasoning we look
3637           // for the offset. If it is positive we basically decrease the
3638           // dereferenceable bytes in a circluar loop now, which will simply
3639           // drive them down to the known value in a very slow way which we
3640           // can accelerate.
3641           T.indicatePessimisticFixpoint();
3642         }
3643       }
3644 
3645       return T.isValidState();
3646     };
3647 
3648     DerefState T;
3649     if (!genericValueTraversal<AADereferenceable, DerefState>(
3650             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3651       return indicatePessimisticFixpoint();
3652 
3653     return clampStateAndIndicateChange(getState(), T);
3654   }
3655 
3656   /// See AbstractAttribute::trackStatistics()
3657   void trackStatistics() const override {
3658     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3659   }
3660 };
3661 
3662 /// Dereferenceable attribute for a return value.
3663 struct AADereferenceableReturned final
3664     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
3665   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3666       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3667             IRP, A) {}
3668 
3669   /// See AbstractAttribute::trackStatistics()
3670   void trackStatistics() const override {
3671     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3672   }
3673 };
3674 
3675 /// Dereferenceable attribute for an argument
3676 struct AADereferenceableArgument final
3677     : AAArgumentFromCallSiteArguments<AADereferenceable,
3678                                       AADereferenceableImpl> {
3679   using Base =
3680       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
3681   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3682       : Base(IRP, A) {}
3683 
3684   /// See AbstractAttribute::trackStatistics()
3685   void trackStatistics() const override {
3686     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3687   }
3688 };
3689 
3690 /// Dereferenceable attribute for a call site argument.
3691 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
3692   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3693       : AADereferenceableFloating(IRP, A) {}
3694 
3695   /// See AbstractAttribute::trackStatistics()
3696   void trackStatistics() const override {
3697     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3698   }
3699 };
3700 
3701 /// Dereferenceable attribute deduction for a call site return value.
3702 struct AADereferenceableCallSiteReturned final
3703     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3704   using Base =
3705       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
3706   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3707       : Base(IRP, A) {}
3708 
3709   /// See AbstractAttribute::trackStatistics()
3710   void trackStatistics() const override {
3711     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3712   }
3713 };
3714 
3715 // ------------------------ Align Argument Attribute ------------------------
3716 
3717 static unsigned getKnownAlignForUse(Attributor &A, AAAlign &QueryingAA,
3718                                     Value &AssociatedValue, const Use *U,
3719                                     const Instruction *I, bool &TrackUse) {
3720   // We need to follow common pointer manipulation uses to the accesses they
3721   // feed into.
3722   if (isa<CastInst>(I)) {
3723     // Follow all but ptr2int casts.
3724     TrackUse = !isa<PtrToIntInst>(I);
3725     return 0;
3726   }
3727   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3728     if (GEP->hasAllConstantIndices())
3729       TrackUse = true;
3730     return 0;
3731   }
3732 
3733   MaybeAlign MA;
3734   if (const auto *CB = dyn_cast<CallBase>(I)) {
3735     if (CB->isBundleOperand(U) || CB->isCallee(U))
3736       return 0;
3737 
3738     unsigned ArgNo = CB->getArgOperandNo(U);
3739     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3740     // As long as we only use known information there is no need to track
3741     // dependences here.
3742     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP, DepClassTy::NONE);
3743     MA = MaybeAlign(AlignAA.getKnownAlign());
3744   }
3745 
3746   const DataLayout &DL = A.getDataLayout();
3747   const Value *UseV = U->get();
3748   if (auto *SI = dyn_cast<StoreInst>(I)) {
3749     if (SI->getPointerOperand() == UseV)
3750       MA = SI->getAlign();
3751   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3752     if (LI->getPointerOperand() == UseV)
3753       MA = LI->getAlign();
3754   }
3755 
3756   if (!MA || *MA <= QueryingAA.getKnownAlign())
3757     return 0;
3758 
3759   unsigned Alignment = MA->value();
3760   int64_t Offset;
3761 
3762   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3763     if (Base == &AssociatedValue) {
3764       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3765       // So we can say that the maximum power of two which is a divisor of
3766       // gcd(Offset, Alignment) is an alignment.
3767 
3768       uint32_t gcd =
3769           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3770       Alignment = llvm::PowerOf2Floor(gcd);
3771     }
3772   }
3773 
3774   return Alignment;
3775 }
3776 
3777 struct AAAlignImpl : AAAlign {
3778   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3779 
3780   /// See AbstractAttribute::initialize(...).
3781   void initialize(Attributor &A) override {
3782     SmallVector<Attribute, 4> Attrs;
3783     getAttrs({Attribute::Alignment}, Attrs);
3784     for (const Attribute &Attr : Attrs)
3785       takeKnownMaximum(Attr.getValueAsInt());
3786 
3787     Value &V = getAssociatedValue();
3788     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3789     //       use of the function pointer. This was caused by D73131. We want to
3790     //       avoid this for function pointers especially because we iterate
3791     //       their uses and int2ptr is not handled. It is not a correctness
3792     //       problem though!
3793     if (!V.getType()->getPointerElementType()->isFunctionTy())
3794       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3795 
3796     if (getIRPosition().isFnInterfaceKind() &&
3797         (!getAnchorScope() ||
3798          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3799       indicatePessimisticFixpoint();
3800       return;
3801     }
3802 
3803     if (Instruction *CtxI = getCtxI())
3804       followUsesInMBEC(*this, A, getState(), *CtxI);
3805   }
3806 
3807   /// See AbstractAttribute::manifest(...).
3808   ChangeStatus manifest(Attributor &A) override {
3809     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3810 
3811     // Check for users that allow alignment annotations.
3812     Value &AssociatedValue = getAssociatedValue();
3813     for (const Use &U : AssociatedValue.uses()) {
3814       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3815         if (SI->getPointerOperand() == &AssociatedValue)
3816           if (SI->getAlignment() < getAssumedAlign()) {
3817             STATS_DECLTRACK(AAAlign, Store,
3818                             "Number of times alignment added to a store");
3819             SI->setAlignment(Align(getAssumedAlign()));
3820             LoadStoreChanged = ChangeStatus::CHANGED;
3821           }
3822       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3823         if (LI->getPointerOperand() == &AssociatedValue)
3824           if (LI->getAlignment() < getAssumedAlign()) {
3825             LI->setAlignment(Align(getAssumedAlign()));
3826             STATS_DECLTRACK(AAAlign, Load,
3827                             "Number of times alignment added to a load");
3828             LoadStoreChanged = ChangeStatus::CHANGED;
3829           }
3830       }
3831     }
3832 
3833     ChangeStatus Changed = AAAlign::manifest(A);
3834 
3835     Align InheritAlign =
3836         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3837     if (InheritAlign >= getAssumedAlign())
3838       return LoadStoreChanged;
3839     return Changed | LoadStoreChanged;
3840   }
3841 
3842   // TODO: Provide a helper to determine the implied ABI alignment and check in
3843   //       the existing manifest method and a new one for AAAlignImpl that value
3844   //       to avoid making the alignment explicit if it did not improve.
3845 
3846   /// See AbstractAttribute::getDeducedAttributes
3847   virtual void
3848   getDeducedAttributes(LLVMContext &Ctx,
3849                        SmallVectorImpl<Attribute> &Attrs) const override {
3850     if (getAssumedAlign() > 1)
3851       Attrs.emplace_back(
3852           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3853   }
3854 
3855   /// See followUsesInMBEC
3856   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3857                        AAAlign::StateType &State) {
3858     bool TrackUse = false;
3859 
3860     unsigned int KnownAlign =
3861         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3862     State.takeKnownMaximum(KnownAlign);
3863 
3864     return TrackUse;
3865   }
3866 
3867   /// See AbstractAttribute::getAsStr().
3868   const std::string getAsStr() const override {
3869     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3870                                 "-" + std::to_string(getAssumedAlign()) + ">")
3871                              : "unknown-align";
3872   }
3873 };
3874 
3875 /// Align attribute for a floating value.
3876 struct AAAlignFloating : AAAlignImpl {
3877   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3878 
3879   /// See AbstractAttribute::updateImpl(...).
3880   ChangeStatus updateImpl(Attributor &A) override {
3881     const DataLayout &DL = A.getDataLayout();
3882 
3883     auto VisitValueCB = [&](Value &V, const Instruction *,
3884                             AAAlign::StateType &T, bool Stripped) -> bool {
3885       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V),
3886                                            DepClassTy::REQUIRED);
3887       if (!Stripped && this == &AA) {
3888         int64_t Offset;
3889         unsigned Alignment = 1;
3890         if (const Value *Base =
3891                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3892           Align PA = Base->getPointerAlignment(DL);
3893           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3894           // So we can say that the maximum power of two which is a divisor of
3895           // gcd(Offset, Alignment) is an alignment.
3896 
3897           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3898                                                uint32_t(PA.value()));
3899           Alignment = llvm::PowerOf2Floor(gcd);
3900         } else {
3901           Alignment = V.getPointerAlignment(DL).value();
3902         }
3903         // Use only IR information if we did not strip anything.
3904         T.takeKnownMaximum(Alignment);
3905         T.indicatePessimisticFixpoint();
3906       } else {
3907         // Use abstract attribute information.
3908         const AAAlign::StateType &DS = AA.getState();
3909         T ^= DS;
3910       }
3911       return T.isValidState();
3912     };
3913 
3914     StateType T;
3915     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3916                                                    VisitValueCB, getCtxI()))
3917       return indicatePessimisticFixpoint();
3918 
3919     // TODO: If we know we visited all incoming values, thus no are assumed
3920     // dead, we can take the known information from the state T.
3921     return clampStateAndIndicateChange(getState(), T);
3922   }
3923 
3924   /// See AbstractAttribute::trackStatistics()
3925   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3926 };
3927 
3928 /// Align attribute for function return value.
3929 struct AAAlignReturned final
3930     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3931   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
3932   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3933 
3934   /// See AbstractAttribute::initialize(...).
3935   void initialize(Attributor &A) override {
3936     Base::initialize(A);
3937     Function *F = getAssociatedFunction();
3938     if (!F || F->isDeclaration())
3939       indicatePessimisticFixpoint();
3940   }
3941 
3942   /// See AbstractAttribute::trackStatistics()
3943   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3944 };
3945 
3946 /// Align attribute for function argument.
3947 struct AAAlignArgument final
3948     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3949   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
3950   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3951 
3952   /// See AbstractAttribute::manifest(...).
3953   ChangeStatus manifest(Attributor &A) override {
3954     // If the associated argument is involved in a must-tail call we give up
3955     // because we would need to keep the argument alignments of caller and
3956     // callee in-sync. Just does not seem worth the trouble right now.
3957     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3958       return ChangeStatus::UNCHANGED;
3959     return Base::manifest(A);
3960   }
3961 
3962   /// See AbstractAttribute::trackStatistics()
3963   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3964 };
3965 
3966 struct AAAlignCallSiteArgument final : AAAlignFloating {
3967   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3968       : AAAlignFloating(IRP, A) {}
3969 
3970   /// See AbstractAttribute::manifest(...).
3971   ChangeStatus manifest(Attributor &A) override {
3972     // If the associated argument is involved in a must-tail call we give up
3973     // because we would need to keep the argument alignments of caller and
3974     // callee in-sync. Just does not seem worth the trouble right now.
3975     if (Argument *Arg = getAssociatedArgument())
3976       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3977         return ChangeStatus::UNCHANGED;
3978     ChangeStatus Changed = AAAlignImpl::manifest(A);
3979     Align InheritAlign =
3980         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3981     if (InheritAlign >= getAssumedAlign())
3982       Changed = ChangeStatus::UNCHANGED;
3983     return Changed;
3984   }
3985 
3986   /// See AbstractAttribute::updateImpl(Attributor &A).
3987   ChangeStatus updateImpl(Attributor &A) override {
3988     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3989     if (Argument *Arg = getAssociatedArgument()) {
3990       // We only take known information from the argument
3991       // so we do not need to track a dependence.
3992       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3993           *this, IRPosition::argument(*Arg), DepClassTy::NONE);
3994       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3995     }
3996     return Changed;
3997   }
3998 
3999   /// See AbstractAttribute::trackStatistics()
4000   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
4001 };
4002 
4003 /// Align attribute deduction for a call site return value.
4004 struct AAAlignCallSiteReturned final
4005     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
4006   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
4007   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
4008       : Base(IRP, A) {}
4009 
4010   /// See AbstractAttribute::initialize(...).
4011   void initialize(Attributor &A) override {
4012     Base::initialize(A);
4013     Function *F = getAssociatedFunction();
4014     if (!F || F->isDeclaration())
4015       indicatePessimisticFixpoint();
4016   }
4017 
4018   /// See AbstractAttribute::trackStatistics()
4019   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
4020 };
4021 
4022 /// ------------------ Function No-Return Attribute ----------------------------
4023 struct AANoReturnImpl : public AANoReturn {
4024   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
4025 
4026   /// See AbstractAttribute::initialize(...).
4027   void initialize(Attributor &A) override {
4028     AANoReturn::initialize(A);
4029     Function *F = getAssociatedFunction();
4030     if (!F || F->isDeclaration())
4031       indicatePessimisticFixpoint();
4032   }
4033 
4034   /// See AbstractAttribute::getAsStr().
4035   const std::string getAsStr() const override {
4036     return getAssumed() ? "noreturn" : "may-return";
4037   }
4038 
4039   /// See AbstractAttribute::updateImpl(Attributor &A).
4040   virtual ChangeStatus updateImpl(Attributor &A) override {
4041     auto CheckForNoReturn = [](Instruction &) { return false; };
4042     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4043                                    {(unsigned)Instruction::Ret}))
4044       return indicatePessimisticFixpoint();
4045     return ChangeStatus::UNCHANGED;
4046   }
4047 };
4048 
4049 struct AANoReturnFunction final : AANoReturnImpl {
4050   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4051       : AANoReturnImpl(IRP, A) {}
4052 
4053   /// See AbstractAttribute::trackStatistics()
4054   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4055 };
4056 
4057 /// NoReturn attribute deduction for a call sites.
4058 struct AANoReturnCallSite final : AANoReturnImpl {
4059   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4060       : AANoReturnImpl(IRP, A) {}
4061 
4062   /// See AbstractAttribute::initialize(...).
4063   void initialize(Attributor &A) override {
4064     AANoReturnImpl::initialize(A);
4065     if (Function *F = getAssociatedFunction()) {
4066       const IRPosition &FnPos = IRPosition::function(*F);
4067       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4068       if (!FnAA.isAssumedNoReturn())
4069         indicatePessimisticFixpoint();
4070     }
4071   }
4072 
4073   /// See AbstractAttribute::updateImpl(...).
4074   ChangeStatus updateImpl(Attributor &A) override {
4075     // TODO: Once we have call site specific value information we can provide
4076     //       call site specific liveness information and then it makes
4077     //       sense to specialize attributes for call sites arguments instead of
4078     //       redirecting requests to the callee argument.
4079     Function *F = getAssociatedFunction();
4080     const IRPosition &FnPos = IRPosition::function(*F);
4081     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos, DepClassTy::REQUIRED);
4082     return clampStateAndIndicateChange(getState(), FnAA.getState());
4083   }
4084 
4085   /// See AbstractAttribute::trackStatistics()
4086   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4087 };
4088 
4089 /// ----------------------- Variable Capturing ---------------------------------
4090 
4091 /// A class to hold the state of for no-capture attributes.
4092 struct AANoCaptureImpl : public AANoCapture {
4093   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4094 
4095   /// See AbstractAttribute::initialize(...).
4096   void initialize(Attributor &A) override {
4097     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4098       indicateOptimisticFixpoint();
4099       return;
4100     }
4101     Function *AnchorScope = getAnchorScope();
4102     if (isFnInterfaceKind() &&
4103         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4104       indicatePessimisticFixpoint();
4105       return;
4106     }
4107 
4108     // You cannot "capture" null in the default address space.
4109     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4110         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4111       indicateOptimisticFixpoint();
4112       return;
4113     }
4114 
4115     const Function *F =
4116         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4117 
4118     // Check what state the associated function can actually capture.
4119     if (F)
4120       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4121     else
4122       indicatePessimisticFixpoint();
4123   }
4124 
4125   /// See AbstractAttribute::updateImpl(...).
4126   ChangeStatus updateImpl(Attributor &A) override;
4127 
4128   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4129   virtual void
4130   getDeducedAttributes(LLVMContext &Ctx,
4131                        SmallVectorImpl<Attribute> &Attrs) const override {
4132     if (!isAssumedNoCaptureMaybeReturned())
4133       return;
4134 
4135     if (isArgumentPosition()) {
4136       if (isAssumedNoCapture())
4137         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4138       else if (ManifestInternal)
4139         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4140     }
4141   }
4142 
4143   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4144   /// depending on the ability of the function associated with \p IRP to capture
4145   /// state in memory and through "returning/throwing", respectively.
4146   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4147                                                    const Function &F,
4148                                                    BitIntegerState &State) {
4149     // TODO: Once we have memory behavior attributes we should use them here.
4150 
4151     // If we know we cannot communicate or write to memory, we do not care about
4152     // ptr2int anymore.
4153     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4154         F.getReturnType()->isVoidTy()) {
4155       State.addKnownBits(NO_CAPTURE);
4156       return;
4157     }
4158 
4159     // A function cannot capture state in memory if it only reads memory, it can
4160     // however return/throw state and the state might be influenced by the
4161     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4162     if (F.onlyReadsMemory())
4163       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4164 
4165     // A function cannot communicate state back if it does not through
4166     // exceptions and doesn not return values.
4167     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4168       State.addKnownBits(NOT_CAPTURED_IN_RET);
4169 
4170     // Check existing "returned" attributes.
4171     int ArgNo = IRP.getCalleeArgNo();
4172     if (F.doesNotThrow() && ArgNo >= 0) {
4173       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4174         if (F.hasParamAttribute(u, Attribute::Returned)) {
4175           if (u == unsigned(ArgNo))
4176             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4177           else if (F.onlyReadsMemory())
4178             State.addKnownBits(NO_CAPTURE);
4179           else
4180             State.addKnownBits(NOT_CAPTURED_IN_RET);
4181           break;
4182         }
4183     }
4184   }
4185 
4186   /// See AbstractState::getAsStr().
4187   const std::string getAsStr() const override {
4188     if (isKnownNoCapture())
4189       return "known not-captured";
4190     if (isAssumedNoCapture())
4191       return "assumed not-captured";
4192     if (isKnownNoCaptureMaybeReturned())
4193       return "known not-captured-maybe-returned";
4194     if (isAssumedNoCaptureMaybeReturned())
4195       return "assumed not-captured-maybe-returned";
4196     return "assumed-captured";
4197   }
4198 };
4199 
4200 /// Attributor-aware capture tracker.
4201 struct AACaptureUseTracker final : public CaptureTracker {
4202 
4203   /// Create a capture tracker that can lookup in-flight abstract attributes
4204   /// through the Attributor \p A.
4205   ///
4206   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4207   /// search is stopped. If a use leads to a return instruction,
4208   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4209   /// If a use leads to a ptr2int which may capture the value,
4210   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4211   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4212   /// set. All values in \p PotentialCopies are later tracked as well. For every
4213   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4214   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4215   /// conservatively set to true.
4216   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4217                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4218                       SmallVectorImpl<const Value *> &PotentialCopies,
4219                       unsigned &RemainingUsesToExplore)
4220       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4221         PotentialCopies(PotentialCopies),
4222         RemainingUsesToExplore(RemainingUsesToExplore) {}
4223 
4224   /// Determine if \p V maybe captured. *Also updates the state!*
4225   bool valueMayBeCaptured(const Value *V) {
4226     if (V->getType()->isPointerTy()) {
4227       PointerMayBeCaptured(V, this);
4228     } else {
4229       State.indicatePessimisticFixpoint();
4230     }
4231     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4232   }
4233 
4234   /// See CaptureTracker::tooManyUses().
4235   void tooManyUses() override {
4236     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4237   }
4238 
4239   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4240     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4241       return true;
4242     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4243         NoCaptureAA, IRPosition::value(*O), DepClassTy::OPTIONAL);
4244     return DerefAA.getAssumedDereferenceableBytes();
4245   }
4246 
4247   /// See CaptureTracker::captured(...).
4248   bool captured(const Use *U) override {
4249     Instruction *UInst = cast<Instruction>(U->getUser());
4250     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4251                       << "\n");
4252 
4253     // Because we may reuse the tracker multiple times we keep track of the
4254     // number of explored uses ourselves as well.
4255     if (RemainingUsesToExplore-- == 0) {
4256       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4257       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4258                           /* Return */ true);
4259     }
4260 
4261     // Deal with ptr2int by following uses.
4262     if (isa<PtrToIntInst>(UInst)) {
4263       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4264       return valueMayBeCaptured(UInst);
4265     }
4266 
4267     // Explicitly catch return instructions.
4268     if (isa<ReturnInst>(UInst))
4269       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4270                           /* Return */ true);
4271 
4272     // For now we only use special logic for call sites. However, the tracker
4273     // itself knows about a lot of other non-capturing cases already.
4274     auto *CB = dyn_cast<CallBase>(UInst);
4275     if (!CB || !CB->isArgOperand(U))
4276       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4277                           /* Return */ true);
4278 
4279     unsigned ArgNo = CB->getArgOperandNo(U);
4280     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4281     // If we have a abstract no-capture attribute for the argument we can use
4282     // it to justify a non-capture attribute here. This allows recursion!
4283     auto &ArgNoCaptureAA =
4284         A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos, DepClassTy::REQUIRED);
4285     if (ArgNoCaptureAA.isAssumedNoCapture())
4286       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4287                           /* Return */ false);
4288     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4289       addPotentialCopy(*CB);
4290       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4291                           /* Return */ false);
4292     }
4293 
4294     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4295     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4296                         /* Return */ true);
4297   }
4298 
4299   /// Register \p CS as potential copy of the value we are checking.
4300   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4301 
4302   /// See CaptureTracker::shouldExplore(...).
4303   bool shouldExplore(const Use *U) override {
4304     // Check liveness and ignore droppable users.
4305     return !U->getUser()->isDroppable() &&
4306            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4307   }
4308 
4309   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4310   /// \p CapturedInRet, then return the appropriate value for use in the
4311   /// CaptureTracker::captured() interface.
4312   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4313                     bool CapturedInRet) {
4314     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4315                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4316     if (CapturedInMem)
4317       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4318     if (CapturedInInt)
4319       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4320     if (CapturedInRet)
4321       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4322     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4323   }
4324 
4325 private:
4326   /// The attributor providing in-flight abstract attributes.
4327   Attributor &A;
4328 
4329   /// The abstract attribute currently updated.
4330   AANoCapture &NoCaptureAA;
4331 
4332   /// The abstract liveness state.
4333   const AAIsDead &IsDeadAA;
4334 
4335   /// The state currently updated.
4336   AANoCapture::StateType &State;
4337 
4338   /// Set of potential copies of the tracked value.
4339   SmallVectorImpl<const Value *> &PotentialCopies;
4340 
4341   /// Global counter to limit the number of explored uses.
4342   unsigned &RemainingUsesToExplore;
4343 };
4344 
4345 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4346   const IRPosition &IRP = getIRPosition();
4347   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4348                                         : &IRP.getAssociatedValue();
4349   if (!V)
4350     return indicatePessimisticFixpoint();
4351 
4352   const Function *F =
4353       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4354   assert(F && "Expected a function!");
4355   const IRPosition &FnPos = IRPosition::function(*F);
4356   const auto &IsDeadAA = A.getAAFor<AAIsDead>(*this, FnPos, DepClassTy::NONE);
4357 
4358   AANoCapture::StateType T;
4359 
4360   // Readonly means we cannot capture through memory.
4361   const auto &FnMemAA =
4362       A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::NONE);
4363   if (FnMemAA.isAssumedReadOnly()) {
4364     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4365     if (FnMemAA.isKnownReadOnly())
4366       addKnownBits(NOT_CAPTURED_IN_MEM);
4367     else
4368       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4369   }
4370 
4371   // Make sure all returned values are different than the underlying value.
4372   // TODO: we could do this in a more sophisticated way inside
4373   //       AAReturnedValues, e.g., track all values that escape through returns
4374   //       directly somehow.
4375   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4376     bool SeenConstant = false;
4377     for (auto &It : RVAA.returned_values()) {
4378       if (isa<Constant>(It.first)) {
4379         if (SeenConstant)
4380           return false;
4381         SeenConstant = true;
4382       } else if (!isa<Argument>(It.first) ||
4383                  It.first == getAssociatedArgument())
4384         return false;
4385     }
4386     return true;
4387   };
4388 
4389   const auto &NoUnwindAA =
4390       A.getAAFor<AANoUnwind>(*this, FnPos, DepClassTy::OPTIONAL);
4391   if (NoUnwindAA.isAssumedNoUnwind()) {
4392     bool IsVoidTy = F->getReturnType()->isVoidTy();
4393     const AAReturnedValues *RVAA =
4394         IsVoidTy ? nullptr
4395                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4396 
4397                                                  DepClassTy::OPTIONAL);
4398     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4399       T.addKnownBits(NOT_CAPTURED_IN_RET);
4400       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4401         return ChangeStatus::UNCHANGED;
4402       if (NoUnwindAA.isKnownNoUnwind() &&
4403           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4404         addKnownBits(NOT_CAPTURED_IN_RET);
4405         if (isKnown(NOT_CAPTURED_IN_MEM))
4406           return indicateOptimisticFixpoint();
4407       }
4408     }
4409   }
4410 
4411   // Use the CaptureTracker interface and logic with the specialized tracker,
4412   // defined in AACaptureUseTracker, that can look at in-flight abstract
4413   // attributes and directly updates the assumed state.
4414   SmallVector<const Value *, 4> PotentialCopies;
4415   unsigned RemainingUsesToExplore =
4416       getDefaultMaxUsesToExploreForCaptureTracking();
4417   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4418                               RemainingUsesToExplore);
4419 
4420   // Check all potential copies of the associated value until we can assume
4421   // none will be captured or we have to assume at least one might be.
4422   unsigned Idx = 0;
4423   PotentialCopies.push_back(V);
4424   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4425     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4426 
4427   AANoCapture::StateType &S = getState();
4428   auto Assumed = S.getAssumed();
4429   S.intersectAssumedBits(T.getAssumed());
4430   if (!isAssumedNoCaptureMaybeReturned())
4431     return indicatePessimisticFixpoint();
4432   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4433                                    : ChangeStatus::CHANGED;
4434 }
4435 
4436 /// NoCapture attribute for function arguments.
4437 struct AANoCaptureArgument final : AANoCaptureImpl {
4438   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4439       : AANoCaptureImpl(IRP, A) {}
4440 
4441   /// See AbstractAttribute::trackStatistics()
4442   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4443 };
4444 
4445 /// NoCapture attribute for call site arguments.
4446 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
4447   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4448       : AANoCaptureImpl(IRP, A) {}
4449 
4450   /// See AbstractAttribute::initialize(...).
4451   void initialize(Attributor &A) override {
4452     if (Argument *Arg = getAssociatedArgument())
4453       if (Arg->hasByValAttr())
4454         indicateOptimisticFixpoint();
4455     AANoCaptureImpl::initialize(A);
4456   }
4457 
4458   /// See AbstractAttribute::updateImpl(...).
4459   ChangeStatus updateImpl(Attributor &A) override {
4460     // TODO: Once we have call site specific value information we can provide
4461     //       call site specific liveness information and then it makes
4462     //       sense to specialize attributes for call sites arguments instead of
4463     //       redirecting requests to the callee argument.
4464     Argument *Arg = getAssociatedArgument();
4465     if (!Arg)
4466       return indicatePessimisticFixpoint();
4467     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4468     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos, DepClassTy::REQUIRED);
4469     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4470   }
4471 
4472   /// See AbstractAttribute::trackStatistics()
4473   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4474 };
4475 
4476 /// NoCapture attribute for floating values.
4477 struct AANoCaptureFloating final : AANoCaptureImpl {
4478   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4479       : AANoCaptureImpl(IRP, A) {}
4480 
4481   /// See AbstractAttribute::trackStatistics()
4482   void trackStatistics() const override {
4483     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4484   }
4485 };
4486 
4487 /// NoCapture attribute for function return value.
4488 struct AANoCaptureReturned final : AANoCaptureImpl {
4489   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4490       : AANoCaptureImpl(IRP, A) {
4491     llvm_unreachable("NoCapture is not applicable to function returns!");
4492   }
4493 
4494   /// See AbstractAttribute::initialize(...).
4495   void initialize(Attributor &A) override {
4496     llvm_unreachable("NoCapture is not applicable to function returns!");
4497   }
4498 
4499   /// See AbstractAttribute::updateImpl(...).
4500   ChangeStatus updateImpl(Attributor &A) override {
4501     llvm_unreachable("NoCapture is not applicable to function returns!");
4502   }
4503 
4504   /// See AbstractAttribute::trackStatistics()
4505   void trackStatistics() const override {}
4506 };
4507 
4508 /// NoCapture attribute deduction for a call site return value.
4509 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
4510   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4511       : AANoCaptureImpl(IRP, A) {}
4512 
4513   /// See AbstractAttribute::initialize(...).
4514   void initialize(Attributor &A) override {
4515     const Function *F = getAnchorScope();
4516     // Check what state the associated function can actually capture.
4517     determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4518   }
4519 
4520   /// See AbstractAttribute::trackStatistics()
4521   void trackStatistics() const override {
4522     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4523   }
4524 };
4525 
4526 /// ------------------ Value Simplify Attribute ----------------------------
4527 struct AAValueSimplifyImpl : AAValueSimplify {
4528   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4529       : AAValueSimplify(IRP, A) {}
4530 
4531   /// See AbstractAttribute::initialize(...).
4532   void initialize(Attributor &A) override {
4533     if (getAssociatedValue().getType()->isVoidTy())
4534       indicatePessimisticFixpoint();
4535   }
4536 
4537   /// See AbstractAttribute::getAsStr().
4538   const std::string getAsStr() const override {
4539     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4540                         : "not-simple";
4541   }
4542 
4543   /// See AbstractAttribute::trackStatistics()
4544   void trackStatistics() const override {}
4545 
4546   /// See AAValueSimplify::getAssumedSimplifiedValue()
4547   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4548     if (!getAssumed())
4549       return const_cast<Value *>(&getAssociatedValue());
4550     return SimplifiedAssociatedValue;
4551   }
4552 
4553   /// Helper function for querying AAValueSimplify and updating candicate.
4554   /// \param QueryingValue Value trying to unify with SimplifiedValue
4555   /// \param AccumulatedSimplifiedValue Current simplification result.
4556   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4557                              Value &QueryingValue,
4558                              Optional<Value *> &AccumulatedSimplifiedValue) {
4559     // FIXME: Add a typecast support.
4560 
4561     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4562         QueryingAA, IRPosition::value(QueryingValue), DepClassTy::REQUIRED);
4563 
4564     Optional<Value *> QueryingValueSimplified =
4565         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4566 
4567     if (!QueryingValueSimplified.hasValue())
4568       return true;
4569 
4570     if (!QueryingValueSimplified.getValue())
4571       return false;
4572 
4573     Value &QueryingValueSimplifiedUnwrapped =
4574         *QueryingValueSimplified.getValue();
4575 
4576     if (AccumulatedSimplifiedValue.hasValue() &&
4577         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4578         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4579       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4580     if (AccumulatedSimplifiedValue.hasValue() &&
4581         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4582       return true;
4583 
4584     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4585                       << " is assumed to be "
4586                       << QueryingValueSimplifiedUnwrapped << "\n");
4587 
4588     AccumulatedSimplifiedValue = QueryingValueSimplified;
4589     return true;
4590   }
4591 
4592   /// Returns a candidate is found or not
4593   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4594     if (!getAssociatedValue().getType()->isIntegerTy())
4595       return false;
4596 
4597     const auto &AA =
4598         A.getAAFor<AAType>(*this, getIRPosition(), DepClassTy::NONE);
4599 
4600     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4601 
4602     if (!COpt.hasValue()) {
4603       SimplifiedAssociatedValue = llvm::None;
4604       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4605       return true;
4606     }
4607     if (auto *C = COpt.getValue()) {
4608       SimplifiedAssociatedValue = C;
4609       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4610       return true;
4611     }
4612     return false;
4613   }
4614 
4615   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4616     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4617       return true;
4618     if (askSimplifiedValueFor<AAPotentialValues>(A))
4619       return true;
4620     return false;
4621   }
4622 
4623   /// See AbstractAttribute::manifest(...).
4624   ChangeStatus manifest(Attributor &A) override {
4625     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4626 
4627     if (SimplifiedAssociatedValue.hasValue() &&
4628         !SimplifiedAssociatedValue.getValue())
4629       return Changed;
4630 
4631     Value &V = getAssociatedValue();
4632     auto *C = SimplifiedAssociatedValue.hasValue()
4633                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4634                   : UndefValue::get(V.getType());
4635     if (C) {
4636       // We can replace the AssociatedValue with the constant.
4637       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4638         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4639                           << " :: " << *this << "\n");
4640         if (A.changeValueAfterManifest(V, *C))
4641           Changed = ChangeStatus::CHANGED;
4642       }
4643     }
4644 
4645     return Changed | AAValueSimplify::manifest(A);
4646   }
4647 
4648   /// See AbstractState::indicatePessimisticFixpoint(...).
4649   ChangeStatus indicatePessimisticFixpoint() override {
4650     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4651     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4652     SimplifiedAssociatedValue = &getAssociatedValue();
4653     indicateOptimisticFixpoint();
4654     return ChangeStatus::CHANGED;
4655   }
4656 
4657 protected:
4658   // An assumed simplified value. Initially, it is set to Optional::None, which
4659   // means that the value is not clear under current assumption. If in the
4660   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4661   // returns orignal associated value.
4662   Optional<Value *> SimplifiedAssociatedValue;
4663 };
4664 
4665 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
4666   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4667       : AAValueSimplifyImpl(IRP, A) {}
4668 
4669   void initialize(Attributor &A) override {
4670     AAValueSimplifyImpl::initialize(A);
4671     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4672       indicatePessimisticFixpoint();
4673     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4674                  Attribute::StructRet, Attribute::Nest},
4675                 /* IgnoreSubsumingPositions */ true))
4676       indicatePessimisticFixpoint();
4677 
4678     // FIXME: This is a hack to prevent us from propagating function poiner in
4679     // the new pass manager CGSCC pass as it creates call edges the
4680     // CallGraphUpdater cannot handle yet.
4681     Value &V = getAssociatedValue();
4682     if (V.getType()->isPointerTy() &&
4683         V.getType()->getPointerElementType()->isFunctionTy() &&
4684         !A.isModulePass())
4685       indicatePessimisticFixpoint();
4686   }
4687 
4688   /// See AbstractAttribute::updateImpl(...).
4689   ChangeStatus updateImpl(Attributor &A) override {
4690     // Byval is only replacable if it is readonly otherwise we would write into
4691     // the replaced value and not the copy that byval creates implicitly.
4692     Argument *Arg = getAssociatedArgument();
4693     if (Arg->hasByValAttr()) {
4694       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4695       //       there is no race by not copying a constant byval.
4696       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
4697                                                        DepClassTy::REQUIRED);
4698       if (!MemAA.isAssumedReadOnly())
4699         return indicatePessimisticFixpoint();
4700     }
4701 
4702     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4703 
4704     auto PredForCallSite = [&](AbstractCallSite ACS) {
4705       const IRPosition &ACSArgPos =
4706           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4707       // Check if a coresponding argument was found or if it is on not
4708       // associated (which can happen for callback calls).
4709       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4710         return false;
4711 
4712       // We can only propagate thread independent values through callbacks.
4713       // This is different to direct/indirect call sites because for them we
4714       // know the thread executing the caller and callee is the same. For
4715       // callbacks this is not guaranteed, thus a thread dependent value could
4716       // be different for the caller and callee, making it invalid to propagate.
4717       Value &ArgOp = ACSArgPos.getAssociatedValue();
4718       if (ACS.isCallbackCall())
4719         if (auto *C = dyn_cast<Constant>(&ArgOp))
4720           if (C->isThreadDependent())
4721             return false;
4722       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4723     };
4724 
4725     bool AllCallSitesKnown;
4726     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4727                                 AllCallSitesKnown))
4728       if (!askSimplifiedValueForOtherAAs(A))
4729         return indicatePessimisticFixpoint();
4730 
4731     // If a candicate was found in this update, return CHANGED.
4732     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4733                ? ChangeStatus::UNCHANGED
4734                : ChangeStatus ::CHANGED;
4735   }
4736 
4737   /// See AbstractAttribute::trackStatistics()
4738   void trackStatistics() const override {
4739     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4740   }
4741 };
4742 
4743 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
4744   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4745       : AAValueSimplifyImpl(IRP, A) {}
4746 
4747   /// See AbstractAttribute::updateImpl(...).
4748   ChangeStatus updateImpl(Attributor &A) override {
4749     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4750 
4751     auto PredForReturned = [&](Value &V) {
4752       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4753     };
4754 
4755     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4756       if (!askSimplifiedValueForOtherAAs(A))
4757         return indicatePessimisticFixpoint();
4758 
4759     // If a candicate was found in this update, return CHANGED.
4760     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4761                ? ChangeStatus::UNCHANGED
4762                : ChangeStatus ::CHANGED;
4763   }
4764 
4765   ChangeStatus manifest(Attributor &A) override {
4766     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4767 
4768     if (SimplifiedAssociatedValue.hasValue() &&
4769         !SimplifiedAssociatedValue.getValue())
4770       return Changed;
4771 
4772     Value &V = getAssociatedValue();
4773     auto *C = SimplifiedAssociatedValue.hasValue()
4774                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4775                   : UndefValue::get(V.getType());
4776     if (C) {
4777       auto PredForReturned =
4778           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4779             // We can replace the AssociatedValue with the constant.
4780             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4781               return true;
4782 
4783             for (ReturnInst *RI : RetInsts) {
4784               if (RI->getFunction() != getAnchorScope())
4785                 continue;
4786               auto *RC = C;
4787               if (RC->getType() != RI->getReturnValue()->getType())
4788                 RC = ConstantExpr::getBitCast(RC,
4789                                               RI->getReturnValue()->getType());
4790               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4791                                 << " in " << *RI << " :: " << *this << "\n");
4792               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4793                 Changed = ChangeStatus::CHANGED;
4794             }
4795             return true;
4796           };
4797       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4798     }
4799 
4800     return Changed | AAValueSimplify::manifest(A);
4801   }
4802 
4803   /// See AbstractAttribute::trackStatistics()
4804   void trackStatistics() const override {
4805     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4806   }
4807 };
4808 
4809 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
4810   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4811       : AAValueSimplifyImpl(IRP, A) {}
4812 
4813   /// See AbstractAttribute::initialize(...).
4814   void initialize(Attributor &A) override {
4815     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4816     //        Needs investigation.
4817     // AAValueSimplifyImpl::initialize(A);
4818     Value &V = getAnchorValue();
4819 
4820     // TODO: add other stuffs
4821     if (isa<Constant>(V))
4822       indicatePessimisticFixpoint();
4823   }
4824 
4825   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4826   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4827   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4828   /// updated and \p Changed is set appropriately.
4829   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4830                               ChangeStatus &Changed) {
4831     if (!ICmp)
4832       return false;
4833     if (!ICmp->isEquality())
4834       return false;
4835 
4836     // This is a comparison with == or !-. We check for nullptr now.
4837     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4838     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4839     if (!Op0IsNull && !Op1IsNull)
4840       return false;
4841 
4842     LLVMContext &Ctx = ICmp->getContext();
4843     // Check for `nullptr ==/!= nullptr` first:
4844     if (Op0IsNull && Op1IsNull) {
4845       Value *NewVal = ConstantInt::get(
4846           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4847       assert(!SimplifiedAssociatedValue.hasValue() &&
4848              "Did not expect non-fixed value for constant comparison");
4849       SimplifiedAssociatedValue = NewVal;
4850       indicateOptimisticFixpoint();
4851       Changed = ChangeStatus::CHANGED;
4852       return true;
4853     }
4854 
4855     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4856     // non-nullptr operand and if we assume it's non-null we can conclude the
4857     // result of the comparison.
4858     assert((Op0IsNull || Op1IsNull) &&
4859            "Expected nullptr versus non-nullptr comparison at this point");
4860 
4861     // The index is the operand that we assume is not null.
4862     unsigned PtrIdx = Op0IsNull;
4863     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4864         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)),
4865         DepClassTy::REQUIRED);
4866     if (!PtrNonNullAA.isAssumedNonNull())
4867       return false;
4868 
4869     // The new value depends on the predicate, true for != and false for ==.
4870     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4871                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4872 
4873     assert((!SimplifiedAssociatedValue.hasValue() ||
4874             SimplifiedAssociatedValue == NewVal) &&
4875            "Did not expect to change value for zero-comparison");
4876 
4877     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4878     SimplifiedAssociatedValue = NewVal;
4879 
4880     if (PtrNonNullAA.isKnownNonNull())
4881       indicateOptimisticFixpoint();
4882 
4883     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4884     return true;
4885   }
4886 
4887   /// See AbstractAttribute::updateImpl(...).
4888   ChangeStatus updateImpl(Attributor &A) override {
4889     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4890 
4891     ChangeStatus Changed;
4892     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4893                                Changed))
4894       return Changed;
4895 
4896     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4897                             bool Stripped) -> bool {
4898       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V),
4899                                              DepClassTy::REQUIRED);
4900       if (!Stripped && this == &AA) {
4901         // TODO: Look the instruction and check recursively.
4902 
4903         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4904                           << "\n");
4905         return false;
4906       }
4907       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4908     };
4909 
4910     bool Dummy = false;
4911     if (!genericValueTraversal<AAValueSimplify, bool>(
4912             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4913             /* UseValueSimplify */ false))
4914       if (!askSimplifiedValueForOtherAAs(A))
4915         return indicatePessimisticFixpoint();
4916 
4917     // If a candicate was found in this update, return CHANGED.
4918 
4919     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4920                ? ChangeStatus::UNCHANGED
4921                : ChangeStatus ::CHANGED;
4922   }
4923 
4924   /// See AbstractAttribute::trackStatistics()
4925   void trackStatistics() const override {
4926     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4927   }
4928 };
4929 
4930 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
4931   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4932       : AAValueSimplifyImpl(IRP, A) {}
4933 
4934   /// See AbstractAttribute::initialize(...).
4935   void initialize(Attributor &A) override {
4936     SimplifiedAssociatedValue = &getAnchorValue();
4937     indicateOptimisticFixpoint();
4938   }
4939   /// See AbstractAttribute::initialize(...).
4940   ChangeStatus updateImpl(Attributor &A) override {
4941     llvm_unreachable(
4942         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4943   }
4944   /// See AbstractAttribute::trackStatistics()
4945   void trackStatistics() const override {
4946     STATS_DECLTRACK_FN_ATTR(value_simplify)
4947   }
4948 };
4949 
4950 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
4951   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4952       : AAValueSimplifyFunction(IRP, A) {}
4953   /// See AbstractAttribute::trackStatistics()
4954   void trackStatistics() const override {
4955     STATS_DECLTRACK_CS_ATTR(value_simplify)
4956   }
4957 };
4958 
4959 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
4960   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4961       : AAValueSimplifyReturned(IRP, A) {}
4962 
4963   /// See AbstractAttribute::manifest(...).
4964   ChangeStatus manifest(Attributor &A) override {
4965     return AAValueSimplifyImpl::manifest(A);
4966   }
4967 
4968   void trackStatistics() const override {
4969     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4970   }
4971 };
4972 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
4973   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4974       : AAValueSimplifyFloating(IRP, A) {}
4975 
4976   /// See AbstractAttribute::manifest(...).
4977   ChangeStatus manifest(Attributor &A) override {
4978     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4979 
4980     if (SimplifiedAssociatedValue.hasValue() &&
4981         !SimplifiedAssociatedValue.getValue())
4982       return Changed;
4983 
4984     Value &V = getAssociatedValue();
4985     auto *C = SimplifiedAssociatedValue.hasValue()
4986                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4987                   : UndefValue::get(V.getType());
4988     if (C) {
4989       Use &U = cast<CallBase>(&getAnchorValue())
4990                    ->getArgOperandUse(getCallSiteArgNo());
4991       // We can replace the AssociatedValue with the constant.
4992       if (&V != C && V.getType() == C->getType()) {
4993         if (A.changeUseAfterManifest(U, *C))
4994           Changed = ChangeStatus::CHANGED;
4995       }
4996     }
4997 
4998     return Changed | AAValueSimplify::manifest(A);
4999   }
5000 
5001   void trackStatistics() const override {
5002     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
5003   }
5004 };
5005 
5006 /// ----------------------- Heap-To-Stack Conversion ---------------------------
5007 struct AAHeapToStackImpl : public AAHeapToStack {
5008   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
5009       : AAHeapToStack(IRP, A) {}
5010 
5011   const std::string getAsStr() const override {
5012     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
5013   }
5014 
5015   ChangeStatus manifest(Attributor &A) override {
5016     assert(getState().isValidState() &&
5017            "Attempted to manifest an invalid state!");
5018 
5019     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
5020     Function *F = getAnchorScope();
5021     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5022 
5023     for (Instruction *MallocCall : MallocCalls) {
5024       // This malloc cannot be replaced.
5025       if (BadMallocCalls.count(MallocCall))
5026         continue;
5027 
5028       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
5029         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
5030         A.deleteAfterManifest(*FreeCall);
5031         HasChanged = ChangeStatus::CHANGED;
5032       }
5033 
5034       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
5035                         << "\n");
5036 
5037       Align Alignment;
5038       Value *Size;
5039       if (isCallocLikeFn(MallocCall, TLI)) {
5040         auto *Num = MallocCall->getOperand(0);
5041         auto *SizeT = MallocCall->getOperand(1);
5042         IRBuilder<> B(MallocCall);
5043         Size = B.CreateMul(Num, SizeT, "h2s.calloc.size");
5044       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5045         Size = MallocCall->getOperand(1);
5046         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5047                                    ->getValue()
5048                                    .getZExtValue())
5049                         .valueOrOne();
5050       } else {
5051         Size = MallocCall->getOperand(0);
5052       }
5053 
5054       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5055       Instruction *AI =
5056           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5057                          "", MallocCall->getNextNode());
5058 
5059       if (AI->getType() != MallocCall->getType())
5060         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5061                              AI->getNextNode());
5062 
5063       A.changeValueAfterManifest(*MallocCall, *AI);
5064 
5065       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5066         auto *NBB = II->getNormalDest();
5067         BranchInst::Create(NBB, MallocCall->getParent());
5068         A.deleteAfterManifest(*MallocCall);
5069       } else {
5070         A.deleteAfterManifest(*MallocCall);
5071       }
5072 
5073       // Zero out the allocated memory if it was a calloc.
5074       if (isCallocLikeFn(MallocCall, TLI)) {
5075         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5076                                    AI->getNextNode());
5077         Value *Ops[] = {
5078             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5079             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5080 
5081         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5082         Module *M = F->getParent();
5083         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5084         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5085       }
5086       HasChanged = ChangeStatus::CHANGED;
5087     }
5088 
5089     return HasChanged;
5090   }
5091 
5092   /// Collection of all malloc calls in a function.
5093   SmallSetVector<Instruction *, 4> MallocCalls;
5094 
5095   /// Collection of malloc calls that cannot be converted.
5096   DenseSet<const Instruction *> BadMallocCalls;
5097 
5098   /// A map for each malloc call to the set of associated free calls.
5099   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5100 
5101   ChangeStatus updateImpl(Attributor &A) override;
5102 };
5103 
5104 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5105   const Function *F = getAnchorScope();
5106   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5107 
5108   MustBeExecutedContextExplorer &Explorer =
5109       A.getInfoCache().getMustBeExecutedContextExplorer();
5110 
5111   auto FreeCheck = [&](Instruction &I) {
5112     const auto &Frees = FreesForMalloc.lookup(&I);
5113     if (Frees.size() != 1)
5114       return false;
5115     Instruction *UniqueFree = *Frees.begin();
5116     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5117   };
5118 
5119   auto UsesCheck = [&](Instruction &I) {
5120     bool ValidUsesOnly = true;
5121     bool MustUse = true;
5122     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5123       Instruction *UserI = cast<Instruction>(U.getUser());
5124       if (isa<LoadInst>(UserI))
5125         return true;
5126       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5127         if (SI->getValueOperand() == U.get()) {
5128           LLVM_DEBUG(dbgs()
5129                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5130           ValidUsesOnly = false;
5131         } else {
5132           // A store into the malloc'ed memory is fine.
5133         }
5134         return true;
5135       }
5136       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5137         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5138           return true;
5139         // Record malloc.
5140         if (isFreeCall(UserI, TLI)) {
5141           if (MustUse) {
5142             FreesForMalloc[&I].insert(UserI);
5143           } else {
5144             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5145                               << *UserI << "\n");
5146             ValidUsesOnly = false;
5147           }
5148           return true;
5149         }
5150 
5151         unsigned ArgNo = CB->getArgOperandNo(&U);
5152 
5153         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5154             *this, IRPosition::callsite_argument(*CB, ArgNo),
5155             DepClassTy::REQUIRED);
5156 
5157         // If a callsite argument use is nofree, we are fine.
5158         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5159             *this, IRPosition::callsite_argument(*CB, ArgNo),
5160             DepClassTy::REQUIRED);
5161 
5162         if (!NoCaptureAA.isAssumedNoCapture() ||
5163             !ArgNoFreeAA.isAssumedNoFree()) {
5164           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5165           ValidUsesOnly = false;
5166         }
5167         return true;
5168       }
5169 
5170       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5171           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5172         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5173         Follow = true;
5174         return true;
5175       }
5176       // Unknown user for which we can not track uses further (in a way that
5177       // makes sense).
5178       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5179       ValidUsesOnly = false;
5180       return true;
5181     };
5182     A.checkForAllUses(Pred, *this, I);
5183     return ValidUsesOnly;
5184   };
5185 
5186   auto MallocCallocCheck = [&](Instruction &I) {
5187     if (BadMallocCalls.count(&I))
5188       return true;
5189 
5190     bool IsMalloc = isMallocLikeFn(&I, TLI);
5191     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5192     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5193     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5194       BadMallocCalls.insert(&I);
5195       return true;
5196     }
5197 
5198     if (IsMalloc) {
5199       if (MaxHeapToStackSize == -1) {
5200         if (UsesCheck(I) || FreeCheck(I)) {
5201           MallocCalls.insert(&I);
5202           return true;
5203         }
5204       }
5205       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5206         if (Size->getValue().ule(MaxHeapToStackSize))
5207           if (UsesCheck(I) || FreeCheck(I)) {
5208             MallocCalls.insert(&I);
5209             return true;
5210           }
5211     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5212       if (MaxHeapToStackSize == -1) {
5213         if (UsesCheck(I) || FreeCheck(I)) {
5214           MallocCalls.insert(&I);
5215           return true;
5216         }
5217       }
5218       // Only if the alignment and sizes are constant.
5219       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5220         if (Size->getValue().ule(MaxHeapToStackSize))
5221           if (UsesCheck(I) || FreeCheck(I)) {
5222             MallocCalls.insert(&I);
5223             return true;
5224           }
5225     } else if (IsCalloc) {
5226       if (MaxHeapToStackSize == -1) {
5227         if (UsesCheck(I) || FreeCheck(I)) {
5228           MallocCalls.insert(&I);
5229           return true;
5230         }
5231       }
5232       bool Overflow = false;
5233       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5234         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5235           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5236                   .ule(MaxHeapToStackSize))
5237             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5238               MallocCalls.insert(&I);
5239               return true;
5240             }
5241     }
5242 
5243     BadMallocCalls.insert(&I);
5244     return true;
5245   };
5246 
5247   size_t NumBadMallocs = BadMallocCalls.size();
5248 
5249   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5250 
5251   if (NumBadMallocs != BadMallocCalls.size())
5252     return ChangeStatus::CHANGED;
5253 
5254   return ChangeStatus::UNCHANGED;
5255 }
5256 
5257 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
5258   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5259       : AAHeapToStackImpl(IRP, A) {}
5260 
5261   /// See AbstractAttribute::trackStatistics().
5262   void trackStatistics() const override {
5263     STATS_DECL(
5264         MallocCalls, Function,
5265         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5266     for (auto *C : MallocCalls)
5267       if (!BadMallocCalls.count(C))
5268         ++BUILD_STAT_NAME(MallocCalls, Function);
5269   }
5270 };
5271 
5272 /// ----------------------- Privatizable Pointers ------------------------------
5273 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
5274   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5275       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5276 
5277   ChangeStatus indicatePessimisticFixpoint() override {
5278     AAPrivatizablePtr::indicatePessimisticFixpoint();
5279     PrivatizableType = nullptr;
5280     return ChangeStatus::CHANGED;
5281   }
5282 
5283   /// Identify the type we can chose for a private copy of the underlying
5284   /// argument. None means it is not clear yet, nullptr means there is none.
5285   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5286 
5287   /// Return a privatizable type that encloses both T0 and T1.
5288   /// TODO: This is merely a stub for now as we should manage a mapping as well.
5289   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5290     if (!T0.hasValue())
5291       return T1;
5292     if (!T1.hasValue())
5293       return T0;
5294     if (T0 == T1)
5295       return T0;
5296     return nullptr;
5297   }
5298 
5299   Optional<Type *> getPrivatizableType() const override {
5300     return PrivatizableType;
5301   }
5302 
5303   const std::string getAsStr() const override {
5304     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5305   }
5306 
5307 protected:
5308   Optional<Type *> PrivatizableType;
5309 };
5310 
5311 // TODO: Do this for call site arguments (probably also other values) as well.
5312 
5313 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
5314   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5315       : AAPrivatizablePtrImpl(IRP, A) {}
5316 
5317   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5318   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5319     // If this is a byval argument and we know all the call sites (so we can
5320     // rewrite them), there is no need to check them explicitly.
5321     bool AllCallSitesKnown;
5322     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5323         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5324                                true, AllCallSitesKnown))
5325       return getAssociatedValue().getType()->getPointerElementType();
5326 
5327     Optional<Type *> Ty;
5328     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5329 
5330     // Make sure the associated call site argument has the same type at all call
5331     // sites and it is an allocation we know is safe to privatize, for now that
5332     // means we only allow alloca instructions.
5333     // TODO: We can additionally analyze the accesses in the callee to  create
5334     //       the type from that information instead. That is a little more
5335     //       involved and will be done in a follow up patch.
5336     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5337       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5338       // Check if a coresponding argument was found or if it is one not
5339       // associated (which can happen for callback calls).
5340       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5341         return false;
5342 
5343       // Check that all call sites agree on a type.
5344       auto &PrivCSArgAA =
5345           A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos, DepClassTy::REQUIRED);
5346       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5347 
5348       LLVM_DEBUG({
5349         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5350         if (CSTy.hasValue() && CSTy.getValue())
5351           CSTy.getValue()->print(dbgs());
5352         else if (CSTy.hasValue())
5353           dbgs() << "<nullptr>";
5354         else
5355           dbgs() << "<none>";
5356       });
5357 
5358       Ty = combineTypes(Ty, CSTy);
5359 
5360       LLVM_DEBUG({
5361         dbgs() << " : New Type: ";
5362         if (Ty.hasValue() && Ty.getValue())
5363           Ty.getValue()->print(dbgs());
5364         else if (Ty.hasValue())
5365           dbgs() << "<nullptr>";
5366         else
5367           dbgs() << "<none>";
5368         dbgs() << "\n";
5369       });
5370 
5371       return !Ty.hasValue() || Ty.getValue();
5372     };
5373 
5374     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5375       return nullptr;
5376     return Ty;
5377   }
5378 
5379   /// See AbstractAttribute::updateImpl(...).
5380   ChangeStatus updateImpl(Attributor &A) override {
5381     PrivatizableType = identifyPrivatizableType(A);
5382     if (!PrivatizableType.hasValue())
5383       return ChangeStatus::UNCHANGED;
5384     if (!PrivatizableType.getValue())
5385       return indicatePessimisticFixpoint();
5386 
5387     // The dependence is optional so we don't give up once we give up on the
5388     // alignment.
5389     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5390                         DepClassTy::OPTIONAL);
5391 
5392     // Avoid arguments with padding for now.
5393     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5394         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5395                                                 A.getInfoCache().getDL())) {
5396       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5397       return indicatePessimisticFixpoint();
5398     }
5399 
5400     // Verify callee and caller agree on how the promoted argument would be
5401     // passed.
5402     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5403     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5404     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5405     Function &Fn = *getIRPosition().getAnchorScope();
5406     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5407     ArgsToPromote.insert(getAssociatedArgument());
5408     const auto *TTI =
5409         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5410     if (!TTI ||
5411         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5412             Fn, *TTI, ArgsToPromote, Dummy) ||
5413         ArgsToPromote.empty()) {
5414       LLVM_DEBUG(
5415           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5416                  << Fn.getName() << "\n");
5417       return indicatePessimisticFixpoint();
5418     }
5419 
5420     // Collect the types that will replace the privatizable type in the function
5421     // signature.
5422     SmallVector<Type *, 16> ReplacementTypes;
5423     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5424 
5425     // Register a rewrite of the argument.
5426     Argument *Arg = getAssociatedArgument();
5427     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5428       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5429       return indicatePessimisticFixpoint();
5430     }
5431 
5432     unsigned ArgNo = Arg->getArgNo();
5433 
5434     // Helper to check if for the given call site the associated argument is
5435     // passed to a callback where the privatization would be different.
5436     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5437       SmallVector<const Use *, 4> CallbackUses;
5438       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5439       for (const Use *U : CallbackUses) {
5440         AbstractCallSite CBACS(U);
5441         assert(CBACS && CBACS.isCallbackCall());
5442         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5443           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5444 
5445           LLVM_DEBUG({
5446             dbgs()
5447                 << "[AAPrivatizablePtr] Argument " << *Arg
5448                 << "check if can be privatized in the context of its parent ("
5449                 << Arg->getParent()->getName()
5450                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5451                    "callback ("
5452                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5453                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5454                 << CBACS.getCallArgOperand(CBArg) << " vs "
5455                 << CB.getArgOperand(ArgNo) << "\n"
5456                 << "[AAPrivatizablePtr] " << CBArg << " : "
5457                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5458           });
5459 
5460           if (CBArgNo != int(ArgNo))
5461             continue;
5462           const auto &CBArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5463               *this, IRPosition::argument(CBArg), DepClassTy::REQUIRED);
5464           if (CBArgPrivAA.isValidState()) {
5465             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5466             if (!CBArgPrivTy.hasValue())
5467               continue;
5468             if (CBArgPrivTy.getValue() == PrivatizableType)
5469               continue;
5470           }
5471 
5472           LLVM_DEBUG({
5473             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5474                    << " cannot be privatized in the context of its parent ("
5475                    << Arg->getParent()->getName()
5476                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5477                       "callback ("
5478                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5479                    << ").\n[AAPrivatizablePtr] for which the argument "
5480                       "privatization is not compatible.\n";
5481           });
5482           return false;
5483         }
5484       }
5485       return true;
5486     };
5487 
5488     // Helper to check if for the given call site the associated argument is
5489     // passed to a direct call where the privatization would be different.
5490     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5491       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5492       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5493       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5494              "Expected a direct call operand for callback call operand");
5495 
5496       LLVM_DEBUG({
5497         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5498                << " check if be privatized in the context of its parent ("
5499                << Arg->getParent()->getName()
5500                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5501                   "direct call of ("
5502                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5503                << ").\n";
5504       });
5505 
5506       Function *DCCallee = DC->getCalledFunction();
5507       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5508         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5509             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)),
5510             DepClassTy::REQUIRED);
5511         if (DCArgPrivAA.isValidState()) {
5512           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5513           if (!DCArgPrivTy.hasValue())
5514             return true;
5515           if (DCArgPrivTy.getValue() == PrivatizableType)
5516             return true;
5517         }
5518       }
5519 
5520       LLVM_DEBUG({
5521         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5522                << " cannot be privatized in the context of its parent ("
5523                << Arg->getParent()->getName()
5524                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5525                   "direct call of ("
5526                << ACS.getInstruction()->getCalledFunction()->getName()
5527                << ").\n[AAPrivatizablePtr] for which the argument "
5528                   "privatization is not compatible.\n";
5529       });
5530       return false;
5531     };
5532 
5533     // Helper to check if the associated argument is used at the given abstract
5534     // call site in a way that is incompatible with the privatization assumed
5535     // here.
5536     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5537       if (ACS.isDirectCall())
5538         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5539       if (ACS.isCallbackCall())
5540         return IsCompatiblePrivArgOfDirectCS(ACS);
5541       return false;
5542     };
5543 
5544     bool AllCallSitesKnown;
5545     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5546                                 AllCallSitesKnown))
5547       return indicatePessimisticFixpoint();
5548 
5549     return ChangeStatus::UNCHANGED;
5550   }
5551 
5552   /// Given a type to private \p PrivType, collect the constituates (which are
5553   /// used) in \p ReplacementTypes.
5554   static void
5555   identifyReplacementTypes(Type *PrivType,
5556                            SmallVectorImpl<Type *> &ReplacementTypes) {
5557     // TODO: For now we expand the privatization type to the fullest which can
5558     //       lead to dead arguments that need to be removed later.
5559     assert(PrivType && "Expected privatizable type!");
5560 
5561     // Traverse the type, extract constituate types on the outermost level.
5562     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5563       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5564         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5565     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5566       ReplacementTypes.append(PrivArrayType->getNumElements(),
5567                               PrivArrayType->getElementType());
5568     } else {
5569       ReplacementTypes.push_back(PrivType);
5570     }
5571   }
5572 
5573   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5574   /// The values needed are taken from the arguments of \p F starting at
5575   /// position \p ArgNo.
5576   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5577                                    unsigned ArgNo, Instruction &IP) {
5578     assert(PrivType && "Expected privatizable type!");
5579 
5580     IRBuilder<NoFolder> IRB(&IP);
5581     const DataLayout &DL = F.getParent()->getDataLayout();
5582 
5583     // Traverse the type, build GEPs and stores.
5584     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5585       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5586       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5587         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5588         Value *Ptr =
5589             constructPointer(PointeeTy, PrivType, &Base,
5590                              PrivStructLayout->getElementOffset(u), IRB, DL);
5591         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5592       }
5593     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5594       Type *PointeeTy = PrivArrayType->getElementType();
5595       Type *PointeePtrTy = PointeeTy->getPointerTo();
5596       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5597       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5598         Value *Ptr = constructPointer(PointeePtrTy, PrivType, &Base,
5599                                       u * PointeeTySize, IRB, DL);
5600         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5601       }
5602     } else {
5603       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5604     }
5605   }
5606 
5607   /// Extract values from \p Base according to the type \p PrivType at the
5608   /// call position \p ACS. The values are appended to \p ReplacementValues.
5609   void createReplacementValues(Align Alignment, Type *PrivType,
5610                                AbstractCallSite ACS, Value *Base,
5611                                SmallVectorImpl<Value *> &ReplacementValues) {
5612     assert(Base && "Expected base value!");
5613     assert(PrivType && "Expected privatizable type!");
5614     Instruction *IP = ACS.getInstruction();
5615 
5616     IRBuilder<NoFolder> IRB(IP);
5617     const DataLayout &DL = IP->getModule()->getDataLayout();
5618 
5619     if (Base->getType()->getPointerElementType() != PrivType)
5620       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5621                                                  "", ACS.getInstruction());
5622 
5623     // Traverse the type, build GEPs and loads.
5624     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5625       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5626       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5627         Type *PointeeTy = PrivStructType->getElementType(u);
5628         Value *Ptr =
5629             constructPointer(PointeeTy->getPointerTo(), PrivType, Base,
5630                              PrivStructLayout->getElementOffset(u), IRB, DL);
5631         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5632         L->setAlignment(Alignment);
5633         ReplacementValues.push_back(L);
5634       }
5635     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5636       Type *PointeeTy = PrivArrayType->getElementType();
5637       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5638       Type *PointeePtrTy = PointeeTy->getPointerTo();
5639       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5640         Value *Ptr = constructPointer(PointeePtrTy, PrivType, Base,
5641                                       u * PointeeTySize, IRB, DL);
5642         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5643         L->setAlignment(Alignment);
5644         ReplacementValues.push_back(L);
5645       }
5646     } else {
5647       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5648       L->setAlignment(Alignment);
5649       ReplacementValues.push_back(L);
5650     }
5651   }
5652 
5653   /// See AbstractAttribute::manifest(...)
5654   ChangeStatus manifest(Attributor &A) override {
5655     if (!PrivatizableType.hasValue())
5656       return ChangeStatus::UNCHANGED;
5657     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5658 
5659     // Collect all tail calls in the function as we cannot allow new allocas to
5660     // escape into tail recursion.
5661     // TODO: Be smarter about new allocas escaping into tail calls.
5662     SmallVector<CallInst *, 16> TailCalls;
5663     if (!A.checkForAllInstructions(
5664             [&](Instruction &I) {
5665               CallInst &CI = cast<CallInst>(I);
5666               if (CI.isTailCall())
5667                 TailCalls.push_back(&CI);
5668               return true;
5669             },
5670             *this, {Instruction::Call}))
5671       return ChangeStatus::UNCHANGED;
5672 
5673     Argument *Arg = getAssociatedArgument();
5674     // Query AAAlign attribute for alignment of associated argument to
5675     // determine the best alignment of loads.
5676     const auto &AlignAA =
5677         A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg), DepClassTy::NONE);
5678 
5679     // Callback to repair the associated function. A new alloca is placed at the
5680     // beginning and initialized with the values passed through arguments. The
5681     // new alloca replaces the use of the old pointer argument.
5682     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5683         [=](const Attributor::ArgumentReplacementInfo &ARI,
5684             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5685           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5686           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5687           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5688                                            Arg->getName() + ".priv", IP);
5689           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5690                                ArgIt->getArgNo(), *IP);
5691 
5692           if (AI->getType() != Arg->getType())
5693             AI =
5694                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5695           Arg->replaceAllUsesWith(AI);
5696 
5697           for (CallInst *CI : TailCalls)
5698             CI->setTailCall(false);
5699         };
5700 
5701     // Callback to repair a call site of the associated function. The elements
5702     // of the privatizable type are loaded prior to the call and passed to the
5703     // new function version.
5704     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5705         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5706                       AbstractCallSite ACS,
5707                       SmallVectorImpl<Value *> &NewArgOperands) {
5708           // When no alignment is specified for the load instruction,
5709           // natural alignment is assumed.
5710           createReplacementValues(
5711               assumeAligned(AlignAA.getAssumedAlign()),
5712               PrivatizableType.getValue(), ACS,
5713               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5714               NewArgOperands);
5715         };
5716 
5717     // Collect the types that will replace the privatizable type in the function
5718     // signature.
5719     SmallVector<Type *, 16> ReplacementTypes;
5720     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5721 
5722     // Register a rewrite of the argument.
5723     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5724                                            std::move(FnRepairCB),
5725                                            std::move(ACSRepairCB)))
5726       return ChangeStatus::CHANGED;
5727     return ChangeStatus::UNCHANGED;
5728   }
5729 
5730   /// See AbstractAttribute::trackStatistics()
5731   void trackStatistics() const override {
5732     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5733   }
5734 };
5735 
5736 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
5737   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5738       : AAPrivatizablePtrImpl(IRP, A) {}
5739 
5740   /// See AbstractAttribute::initialize(...).
5741   virtual void initialize(Attributor &A) override {
5742     // TODO: We can privatize more than arguments.
5743     indicatePessimisticFixpoint();
5744   }
5745 
5746   ChangeStatus updateImpl(Attributor &A) override {
5747     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5748                      "updateImpl will not be called");
5749   }
5750 
5751   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
5752   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5753     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5754     if (!Obj) {
5755       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5756       return nullptr;
5757     }
5758 
5759     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5760       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5761         if (CI->isOne())
5762           return Obj->getType()->getPointerElementType();
5763     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5764       auto &PrivArgAA = A.getAAFor<AAPrivatizablePtr>(
5765           *this, IRPosition::argument(*Arg), DepClassTy::REQUIRED);
5766       if (PrivArgAA.isAssumedPrivatizablePtr())
5767         return Obj->getType()->getPointerElementType();
5768     }
5769 
5770     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5771                          "alloca nor privatizable argument: "
5772                       << *Obj << "!\n");
5773     return nullptr;
5774   }
5775 
5776   /// See AbstractAttribute::trackStatistics()
5777   void trackStatistics() const override {
5778     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5779   }
5780 };
5781 
5782 struct AAPrivatizablePtrCallSiteArgument final
5783     : public AAPrivatizablePtrFloating {
5784   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5785       : AAPrivatizablePtrFloating(IRP, A) {}
5786 
5787   /// See AbstractAttribute::initialize(...).
5788   void initialize(Attributor &A) override {
5789     if (getIRPosition().hasAttr(Attribute::ByVal))
5790       indicateOptimisticFixpoint();
5791   }
5792 
5793   /// See AbstractAttribute::updateImpl(...).
5794   ChangeStatus updateImpl(Attributor &A) override {
5795     PrivatizableType = identifyPrivatizableType(A);
5796     if (!PrivatizableType.hasValue())
5797       return ChangeStatus::UNCHANGED;
5798     if (!PrivatizableType.getValue())
5799       return indicatePessimisticFixpoint();
5800 
5801     const IRPosition &IRP = getIRPosition();
5802     auto &NoCaptureAA =
5803         A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::REQUIRED);
5804     if (!NoCaptureAA.isAssumedNoCapture()) {
5805       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5806       return indicatePessimisticFixpoint();
5807     }
5808 
5809     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP, DepClassTy::REQUIRED);
5810     if (!NoAliasAA.isAssumedNoAlias()) {
5811       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5812       return indicatePessimisticFixpoint();
5813     }
5814 
5815     const auto &MemBehaviorAA =
5816         A.getAAFor<AAMemoryBehavior>(*this, IRP, DepClassTy::REQUIRED);
5817     if (!MemBehaviorAA.isAssumedReadOnly()) {
5818       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5819       return indicatePessimisticFixpoint();
5820     }
5821 
5822     return ChangeStatus::UNCHANGED;
5823   }
5824 
5825   /// See AbstractAttribute::trackStatistics()
5826   void trackStatistics() const override {
5827     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5828   }
5829 };
5830 
5831 struct AAPrivatizablePtrCallSiteReturned final
5832     : public AAPrivatizablePtrFloating {
5833   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5834       : AAPrivatizablePtrFloating(IRP, A) {}
5835 
5836   /// See AbstractAttribute::initialize(...).
5837   void initialize(Attributor &A) override {
5838     // TODO: We can privatize more than arguments.
5839     indicatePessimisticFixpoint();
5840   }
5841 
5842   /// See AbstractAttribute::trackStatistics()
5843   void trackStatistics() const override {
5844     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5845   }
5846 };
5847 
5848 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
5849   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5850       : AAPrivatizablePtrFloating(IRP, A) {}
5851 
5852   /// See AbstractAttribute::initialize(...).
5853   void initialize(Attributor &A) override {
5854     // TODO: We can privatize more than arguments.
5855     indicatePessimisticFixpoint();
5856   }
5857 
5858   /// See AbstractAttribute::trackStatistics()
5859   void trackStatistics() const override {
5860     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5861   }
5862 };
5863 
5864 /// -------------------- Memory Behavior Attributes ----------------------------
5865 /// Includes read-none, read-only, and write-only.
5866 /// ----------------------------------------------------------------------------
5867 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
5868   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5869       : AAMemoryBehavior(IRP, A) {}
5870 
5871   /// See AbstractAttribute::initialize(...).
5872   void initialize(Attributor &A) override {
5873     intersectAssumedBits(BEST_STATE);
5874     getKnownStateFromValue(getIRPosition(), getState());
5875     AAMemoryBehavior::initialize(A);
5876   }
5877 
5878   /// Return the memory behavior information encoded in the IR for \p IRP.
5879   static void getKnownStateFromValue(const IRPosition &IRP,
5880                                      BitIntegerState &State,
5881                                      bool IgnoreSubsumingPositions = false) {
5882     SmallVector<Attribute, 2> Attrs;
5883     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5884     for (const Attribute &Attr : Attrs) {
5885       switch (Attr.getKindAsEnum()) {
5886       case Attribute::ReadNone:
5887         State.addKnownBits(NO_ACCESSES);
5888         break;
5889       case Attribute::ReadOnly:
5890         State.addKnownBits(NO_WRITES);
5891         break;
5892       case Attribute::WriteOnly:
5893         State.addKnownBits(NO_READS);
5894         break;
5895       default:
5896         llvm_unreachable("Unexpected attribute!");
5897       }
5898     }
5899 
5900     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5901       if (!I->mayReadFromMemory())
5902         State.addKnownBits(NO_READS);
5903       if (!I->mayWriteToMemory())
5904         State.addKnownBits(NO_WRITES);
5905     }
5906   }
5907 
5908   /// See AbstractAttribute::getDeducedAttributes(...).
5909   void getDeducedAttributes(LLVMContext &Ctx,
5910                             SmallVectorImpl<Attribute> &Attrs) const override {
5911     assert(Attrs.size() == 0);
5912     if (isAssumedReadNone())
5913       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5914     else if (isAssumedReadOnly())
5915       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5916     else if (isAssumedWriteOnly())
5917       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5918     assert(Attrs.size() <= 1);
5919   }
5920 
5921   /// See AbstractAttribute::manifest(...).
5922   ChangeStatus manifest(Attributor &A) override {
5923     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5924       return ChangeStatus::UNCHANGED;
5925 
5926     const IRPosition &IRP = getIRPosition();
5927 
5928     // Check if we would improve the existing attributes first.
5929     SmallVector<Attribute, 4> DeducedAttrs;
5930     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5931     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5932           return IRP.hasAttr(Attr.getKindAsEnum(),
5933                              /* IgnoreSubsumingPositions */ true);
5934         }))
5935       return ChangeStatus::UNCHANGED;
5936 
5937     // Clear existing attributes.
5938     IRP.removeAttrs(AttrKinds);
5939 
5940     // Use the generic manifest method.
5941     return IRAttribute::manifest(A);
5942   }
5943 
5944   /// See AbstractState::getAsStr().
5945   const std::string getAsStr() const override {
5946     if (isAssumedReadNone())
5947       return "readnone";
5948     if (isAssumedReadOnly())
5949       return "readonly";
5950     if (isAssumedWriteOnly())
5951       return "writeonly";
5952     return "may-read/write";
5953   }
5954 
5955   /// The set of IR attributes AAMemoryBehavior deals with.
5956   static const Attribute::AttrKind AttrKinds[3];
5957 };
5958 
5959 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5960     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5961 
5962 /// Memory behavior attribute for a floating value.
5963 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
5964   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5965       : AAMemoryBehaviorImpl(IRP, A) {}
5966 
5967   /// See AbstractAttribute::initialize(...).
5968   void initialize(Attributor &A) override {
5969     AAMemoryBehaviorImpl::initialize(A);
5970     addUsesOf(A, getAssociatedValue());
5971   }
5972 
5973   /// See AbstractAttribute::updateImpl(...).
5974   ChangeStatus updateImpl(Attributor &A) override;
5975 
5976   /// See AbstractAttribute::trackStatistics()
5977   void trackStatistics() const override {
5978     if (isAssumedReadNone())
5979       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5980     else if (isAssumedReadOnly())
5981       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5982     else if (isAssumedWriteOnly())
5983       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5984   }
5985 
5986 private:
5987   /// Return true if users of \p UserI might access the underlying
5988   /// variable/location described by \p U and should therefore be analyzed.
5989   bool followUsersOfUseIn(Attributor &A, const Use *U,
5990                           const Instruction *UserI);
5991 
5992   /// Update the state according to the effect of use \p U in \p UserI.
5993   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5994 
5995 protected:
5996   /// Add the uses of \p V to the `Uses` set we look at during the update step.
5997   void addUsesOf(Attributor &A, const Value &V);
5998 
5999   /// Container for (transitive) uses of the associated argument.
6000   SmallVector<const Use *, 8> Uses;
6001 
6002   /// Set to remember the uses we already traversed.
6003   SmallPtrSet<const Use *, 8> Visited;
6004 };
6005 
6006 /// Memory behavior attribute for function argument.
6007 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
6008   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
6009       : AAMemoryBehaviorFloating(IRP, A) {}
6010 
6011   /// See AbstractAttribute::initialize(...).
6012   void initialize(Attributor &A) override {
6013     intersectAssumedBits(BEST_STATE);
6014     const IRPosition &IRP = getIRPosition();
6015     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
6016     // can query it when we use has/getAttr. That would allow us to reuse the
6017     // initialize of the base class here.
6018     bool HasByVal =
6019         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
6020     getKnownStateFromValue(IRP, getState(),
6021                            /* IgnoreSubsumingPositions */ HasByVal);
6022 
6023     // Initialize the use vector with all direct uses of the associated value.
6024     Argument *Arg = getAssociatedArgument();
6025     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
6026       indicatePessimisticFixpoint();
6027     } else {
6028       addUsesOf(A, *Arg);
6029     }
6030   }
6031 
6032   ChangeStatus manifest(Attributor &A) override {
6033     // TODO: Pointer arguments are not supported on vectors of pointers yet.
6034     if (!getAssociatedValue().getType()->isPointerTy())
6035       return ChangeStatus::UNCHANGED;
6036 
6037     // TODO: From readattrs.ll: "inalloca parameters are always
6038     //                           considered written"
6039     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
6040       removeKnownBits(NO_WRITES);
6041       removeAssumedBits(NO_WRITES);
6042     }
6043     return AAMemoryBehaviorFloating::manifest(A);
6044   }
6045 
6046   /// See AbstractAttribute::trackStatistics()
6047   void trackStatistics() const override {
6048     if (isAssumedReadNone())
6049       STATS_DECLTRACK_ARG_ATTR(readnone)
6050     else if (isAssumedReadOnly())
6051       STATS_DECLTRACK_ARG_ATTR(readonly)
6052     else if (isAssumedWriteOnly())
6053       STATS_DECLTRACK_ARG_ATTR(writeonly)
6054   }
6055 };
6056 
6057 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
6058   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
6059       : AAMemoryBehaviorArgument(IRP, A) {}
6060 
6061   /// See AbstractAttribute::initialize(...).
6062   void initialize(Attributor &A) override {
6063     // If we don't have an associated attribute this is either a variadic call
6064     // or an indirect call, either way, nothing to do here.
6065     Argument *Arg = getAssociatedArgument();
6066     if (!Arg) {
6067       indicatePessimisticFixpoint();
6068       return;
6069     }
6070     if (Arg->hasByValAttr()) {
6071       addKnownBits(NO_WRITES);
6072       removeKnownBits(NO_READS);
6073       removeAssumedBits(NO_READS);
6074     }
6075     AAMemoryBehaviorArgument::initialize(A);
6076     if (getAssociatedFunction()->isDeclaration())
6077       indicatePessimisticFixpoint();
6078   }
6079 
6080   /// See AbstractAttribute::updateImpl(...).
6081   ChangeStatus updateImpl(Attributor &A) override {
6082     // TODO: Once we have call site specific value information we can provide
6083     //       call site specific liveness liveness information and then it makes
6084     //       sense to specialize attributes for call sites arguments instead of
6085     //       redirecting requests to the callee argument.
6086     Argument *Arg = getAssociatedArgument();
6087     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6088     auto &ArgAA =
6089         A.getAAFor<AAMemoryBehavior>(*this, ArgPos, DepClassTy::REQUIRED);
6090     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6091   }
6092 
6093   /// See AbstractAttribute::trackStatistics()
6094   void trackStatistics() const override {
6095     if (isAssumedReadNone())
6096       STATS_DECLTRACK_CSARG_ATTR(readnone)
6097     else if (isAssumedReadOnly())
6098       STATS_DECLTRACK_CSARG_ATTR(readonly)
6099     else if (isAssumedWriteOnly())
6100       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6101   }
6102 };
6103 
6104 /// Memory behavior attribute for a call site return position.
6105 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
6106   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6107       : AAMemoryBehaviorFloating(IRP, A) {}
6108 
6109   /// See AbstractAttribute::initialize(...).
6110   void initialize(Attributor &A) override {
6111     AAMemoryBehaviorImpl::initialize(A);
6112     Function *F = getAssociatedFunction();
6113     if (!F || F->isDeclaration())
6114       indicatePessimisticFixpoint();
6115   }
6116 
6117   /// See AbstractAttribute::manifest(...).
6118   ChangeStatus manifest(Attributor &A) override {
6119     // We do not annotate returned values.
6120     return ChangeStatus::UNCHANGED;
6121   }
6122 
6123   /// See AbstractAttribute::trackStatistics()
6124   void trackStatistics() const override {}
6125 };
6126 
6127 /// An AA to represent the memory behavior function attributes.
6128 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
6129   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6130       : AAMemoryBehaviorImpl(IRP, A) {}
6131 
6132   /// See AbstractAttribute::updateImpl(Attributor &A).
6133   virtual ChangeStatus updateImpl(Attributor &A) override;
6134 
6135   /// See AbstractAttribute::manifest(...).
6136   ChangeStatus manifest(Attributor &A) override {
6137     Function &F = cast<Function>(getAnchorValue());
6138     if (isAssumedReadNone()) {
6139       F.removeFnAttr(Attribute::ArgMemOnly);
6140       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6141       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6142     }
6143     return AAMemoryBehaviorImpl::manifest(A);
6144   }
6145 
6146   /// See AbstractAttribute::trackStatistics()
6147   void trackStatistics() const override {
6148     if (isAssumedReadNone())
6149       STATS_DECLTRACK_FN_ATTR(readnone)
6150     else if (isAssumedReadOnly())
6151       STATS_DECLTRACK_FN_ATTR(readonly)
6152     else if (isAssumedWriteOnly())
6153       STATS_DECLTRACK_FN_ATTR(writeonly)
6154   }
6155 };
6156 
6157 /// AAMemoryBehavior attribute for call sites.
6158 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
6159   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6160       : AAMemoryBehaviorImpl(IRP, A) {}
6161 
6162   /// See AbstractAttribute::initialize(...).
6163   void initialize(Attributor &A) override {
6164     AAMemoryBehaviorImpl::initialize(A);
6165     Function *F = getAssociatedFunction();
6166     if (!F || F->isDeclaration())
6167       indicatePessimisticFixpoint();
6168   }
6169 
6170   /// See AbstractAttribute::updateImpl(...).
6171   ChangeStatus updateImpl(Attributor &A) override {
6172     // TODO: Once we have call site specific value information we can provide
6173     //       call site specific liveness liveness information and then it makes
6174     //       sense to specialize attributes for call sites arguments instead of
6175     //       redirecting requests to the callee argument.
6176     Function *F = getAssociatedFunction();
6177     const IRPosition &FnPos = IRPosition::function(*F);
6178     auto &FnAA =
6179         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::REQUIRED);
6180     return clampStateAndIndicateChange(getState(), FnAA.getState());
6181   }
6182 
6183   /// See AbstractAttribute::trackStatistics()
6184   void trackStatistics() const override {
6185     if (isAssumedReadNone())
6186       STATS_DECLTRACK_CS_ATTR(readnone)
6187     else if (isAssumedReadOnly())
6188       STATS_DECLTRACK_CS_ATTR(readonly)
6189     else if (isAssumedWriteOnly())
6190       STATS_DECLTRACK_CS_ATTR(writeonly)
6191   }
6192 };
6193 
6194 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6195 
6196   // The current assumed state used to determine a change.
6197   auto AssumedState = getAssumed();
6198 
6199   auto CheckRWInst = [&](Instruction &I) {
6200     // If the instruction has an own memory behavior state, use it to restrict
6201     // the local state. No further analysis is required as the other memory
6202     // state is as optimistic as it gets.
6203     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6204       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6205           *this, IRPosition::callsite_function(*CB), DepClassTy::REQUIRED);
6206       intersectAssumedBits(MemBehaviorAA.getAssumed());
6207       return !isAtFixpoint();
6208     }
6209 
6210     // Remove access kind modifiers if necessary.
6211     if (I.mayReadFromMemory())
6212       removeAssumedBits(NO_READS);
6213     if (I.mayWriteToMemory())
6214       removeAssumedBits(NO_WRITES);
6215     return !isAtFixpoint();
6216   };
6217 
6218   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6219     return indicatePessimisticFixpoint();
6220 
6221   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6222                                         : ChangeStatus::UNCHANGED;
6223 }
6224 
6225 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6226 
6227   const IRPosition &IRP = getIRPosition();
6228   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6229   AAMemoryBehavior::StateType &S = getState();
6230 
6231   // First, check the function scope. We take the known information and we avoid
6232   // work if the assumed information implies the current assumed information for
6233   // this attribute. This is a valid for all but byval arguments.
6234   Argument *Arg = IRP.getAssociatedArgument();
6235   AAMemoryBehavior::base_t FnMemAssumedState =
6236       AAMemoryBehavior::StateType::getWorstState();
6237   if (!Arg || !Arg->hasByValAttr()) {
6238     const auto &FnMemAA =
6239         A.getAAFor<AAMemoryBehavior>(*this, FnPos, DepClassTy::OPTIONAL);
6240     FnMemAssumedState = FnMemAA.getAssumed();
6241     S.addKnownBits(FnMemAA.getKnown());
6242     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6243       return ChangeStatus::UNCHANGED;
6244   }
6245 
6246   // Make sure the value is not captured (except through "return"), if
6247   // it is, any information derived would be irrelevant anyway as we cannot
6248   // check the potential aliases introduced by the capture. However, no need
6249   // to fall back to anythign less optimistic than the function state.
6250   const auto &ArgNoCaptureAA =
6251       A.getAAFor<AANoCapture>(*this, IRP, DepClassTy::OPTIONAL);
6252   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6253     S.intersectAssumedBits(FnMemAssumedState);
6254     return ChangeStatus::CHANGED;
6255   }
6256 
6257   // The current assumed state used to determine a change.
6258   auto AssumedState = S.getAssumed();
6259 
6260   // Liveness information to exclude dead users.
6261   // TODO: Take the FnPos once we have call site specific liveness information.
6262   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6263       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6264       DepClassTy::NONE);
6265 
6266   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6267   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6268     const Use *U = Uses[i];
6269     Instruction *UserI = cast<Instruction>(U->getUser());
6270     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6271                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6272                       << "]\n");
6273     if (A.isAssumedDead(*U, this, &LivenessAA))
6274       continue;
6275 
6276     // Droppable users, e.g., llvm::assume does not actually perform any action.
6277     if (UserI->isDroppable())
6278       continue;
6279 
6280     // Check if the users of UserI should also be visited.
6281     if (followUsersOfUseIn(A, U, UserI))
6282       addUsesOf(A, *UserI);
6283 
6284     // If UserI might touch memory we analyze the use in detail.
6285     if (UserI->mayReadOrWriteMemory())
6286       analyzeUseIn(A, U, UserI);
6287   }
6288 
6289   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6290                                         : ChangeStatus::UNCHANGED;
6291 }
6292 
6293 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6294   SmallVector<const Use *, 8> WL;
6295   for (const Use &U : V.uses())
6296     WL.push_back(&U);
6297 
6298   while (!WL.empty()) {
6299     const Use *U = WL.pop_back_val();
6300     if (!Visited.insert(U).second)
6301       continue;
6302 
6303     const Instruction *UserI = cast<Instruction>(U->getUser());
6304     if (UserI->mayReadOrWriteMemory()) {
6305       Uses.push_back(U);
6306       continue;
6307     }
6308     if (!followUsersOfUseIn(A, U, UserI))
6309       continue;
6310     for (const Use &UU : UserI->uses())
6311       WL.push_back(&UU);
6312   }
6313 }
6314 
6315 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6316                                                   const Instruction *UserI) {
6317   // The loaded value is unrelated to the pointer argument, no need to
6318   // follow the users of the load.
6319   if (isa<LoadInst>(UserI))
6320     return false;
6321 
6322   // By default we follow all uses assuming UserI might leak information on U,
6323   // we have special handling for call sites operands though.
6324   const auto *CB = dyn_cast<CallBase>(UserI);
6325   if (!CB || !CB->isArgOperand(U))
6326     return true;
6327 
6328   // If the use is a call argument known not to be captured, the users of
6329   // the call do not need to be visited because they have to be unrelated to
6330   // the input. Note that this check is not trivial even though we disallow
6331   // general capturing of the underlying argument. The reason is that the
6332   // call might the argument "through return", which we allow and for which we
6333   // need to check call users.
6334   if (U->get()->getType()->isPointerTy()) {
6335     unsigned ArgNo = CB->getArgOperandNo(U);
6336     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6337         *this, IRPosition::callsite_argument(*CB, ArgNo), DepClassTy::OPTIONAL);
6338     return !ArgNoCaptureAA.isAssumedNoCapture();
6339   }
6340 
6341   return true;
6342 }
6343 
6344 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6345                                             const Instruction *UserI) {
6346   assert(UserI->mayReadOrWriteMemory());
6347 
6348   switch (UserI->getOpcode()) {
6349   default:
6350     // TODO: Handle all atomics and other side-effect operations we know of.
6351     break;
6352   case Instruction::Load:
6353     // Loads cause the NO_READS property to disappear.
6354     removeAssumedBits(NO_READS);
6355     return;
6356 
6357   case Instruction::Store:
6358     // Stores cause the NO_WRITES property to disappear if the use is the
6359     // pointer operand. Note that we do assume that capturing was taken care of
6360     // somewhere else.
6361     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6362       removeAssumedBits(NO_WRITES);
6363     return;
6364 
6365   case Instruction::Call:
6366   case Instruction::CallBr:
6367   case Instruction::Invoke: {
6368     // For call sites we look at the argument memory behavior attribute (this
6369     // could be recursive!) in order to restrict our own state.
6370     const auto *CB = cast<CallBase>(UserI);
6371 
6372     // Give up on operand bundles.
6373     if (CB->isBundleOperand(U)) {
6374       indicatePessimisticFixpoint();
6375       return;
6376     }
6377 
6378     // Calling a function does read the function pointer, maybe write it if the
6379     // function is self-modifying.
6380     if (CB->isCallee(U)) {
6381       removeAssumedBits(NO_READS);
6382       break;
6383     }
6384 
6385     // Adjust the possible access behavior based on the information on the
6386     // argument.
6387     IRPosition Pos;
6388     if (U->get()->getType()->isPointerTy())
6389       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6390     else
6391       Pos = IRPosition::callsite_function(*CB);
6392     const auto &MemBehaviorAA =
6393         A.getAAFor<AAMemoryBehavior>(*this, Pos, DepClassTy::OPTIONAL);
6394     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6395     // and at least "known".
6396     intersectAssumedBits(MemBehaviorAA.getAssumed());
6397     return;
6398   }
6399   };
6400 
6401   // Generally, look at the "may-properties" and adjust the assumed state if we
6402   // did not trigger special handling before.
6403   if (UserI->mayReadFromMemory())
6404     removeAssumedBits(NO_READS);
6405   if (UserI->mayWriteToMemory())
6406     removeAssumedBits(NO_WRITES);
6407 }
6408 
6409 } // namespace
6410 
6411 /// -------------------- Memory Locations Attributes ---------------------------
6412 /// Includes read-none, argmemonly, inaccessiblememonly,
6413 /// inaccessiblememorargmemonly
6414 /// ----------------------------------------------------------------------------
6415 
6416 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6417     AAMemoryLocation::MemoryLocationsKind MLK) {
6418   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6419     return "all memory";
6420   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6421     return "no memory";
6422   std::string S = "memory:";
6423   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6424     S += "stack,";
6425   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6426     S += "constant,";
6427   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6428     S += "internal global,";
6429   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6430     S += "external global,";
6431   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6432     S += "argument,";
6433   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6434     S += "inaccessible,";
6435   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6436     S += "malloced,";
6437   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6438     S += "unknown,";
6439   S.pop_back();
6440   return S;
6441 }
6442 
6443 namespace {
6444 struct AAMemoryLocationImpl : public AAMemoryLocation {
6445 
6446   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6447       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6448     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6449       AccessKind2Accesses[u] = nullptr;
6450   }
6451 
6452   ~AAMemoryLocationImpl() {
6453     // The AccessSets are allocated via a BumpPtrAllocator, we call
6454     // the destructor manually.
6455     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6456       if (AccessKind2Accesses[u])
6457         AccessKind2Accesses[u]->~AccessSet();
6458   }
6459 
6460   /// See AbstractAttribute::initialize(...).
6461   void initialize(Attributor &A) override {
6462     intersectAssumedBits(BEST_STATE);
6463     getKnownStateFromValue(A, getIRPosition(), getState());
6464     AAMemoryLocation::initialize(A);
6465   }
6466 
6467   /// Return the memory behavior information encoded in the IR for \p IRP.
6468   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6469                                      BitIntegerState &State,
6470                                      bool IgnoreSubsumingPositions = false) {
6471     // For internal functions we ignore `argmemonly` and
6472     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6473     // constant propagation. It is unclear if this is the best way but it is
6474     // unlikely this will cause real performance problems. If we are deriving
6475     // attributes for the anchor function we even remove the attribute in
6476     // addition to ignoring it.
6477     bool UseArgMemOnly = true;
6478     Function *AnchorFn = IRP.getAnchorScope();
6479     if (AnchorFn && A.isRunOn(*AnchorFn))
6480       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6481 
6482     SmallVector<Attribute, 2> Attrs;
6483     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6484     for (const Attribute &Attr : Attrs) {
6485       switch (Attr.getKindAsEnum()) {
6486       case Attribute::ReadNone:
6487         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6488         break;
6489       case Attribute::InaccessibleMemOnly:
6490         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6491         break;
6492       case Attribute::ArgMemOnly:
6493         if (UseArgMemOnly)
6494           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6495         else
6496           IRP.removeAttrs({Attribute::ArgMemOnly});
6497         break;
6498       case Attribute::InaccessibleMemOrArgMemOnly:
6499         if (UseArgMemOnly)
6500           State.addKnownBits(inverseLocation(
6501               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6502         else
6503           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6504         break;
6505       default:
6506         llvm_unreachable("Unexpected attribute!");
6507       }
6508     }
6509   }
6510 
6511   /// See AbstractAttribute::getDeducedAttributes(...).
6512   void getDeducedAttributes(LLVMContext &Ctx,
6513                             SmallVectorImpl<Attribute> &Attrs) const override {
6514     assert(Attrs.size() == 0);
6515     if (isAssumedReadNone()) {
6516       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6517     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6518       if (isAssumedInaccessibleMemOnly())
6519         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6520       else if (isAssumedArgMemOnly())
6521         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6522       else if (isAssumedInaccessibleOrArgMemOnly())
6523         Attrs.push_back(
6524             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6525     }
6526     assert(Attrs.size() <= 1);
6527   }
6528 
6529   /// See AbstractAttribute::manifest(...).
6530   ChangeStatus manifest(Attributor &A) override {
6531     const IRPosition &IRP = getIRPosition();
6532 
6533     // Check if we would improve the existing attributes first.
6534     SmallVector<Attribute, 4> DeducedAttrs;
6535     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6536     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6537           return IRP.hasAttr(Attr.getKindAsEnum(),
6538                              /* IgnoreSubsumingPositions */ true);
6539         }))
6540       return ChangeStatus::UNCHANGED;
6541 
6542     // Clear existing attributes.
6543     IRP.removeAttrs(AttrKinds);
6544     if (isAssumedReadNone())
6545       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6546 
6547     // Use the generic manifest method.
6548     return IRAttribute::manifest(A);
6549   }
6550 
6551   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
6552   bool checkForAllAccessesToMemoryKind(
6553       function_ref<bool(const Instruction *, const Value *, AccessKind,
6554                         MemoryLocationsKind)>
6555           Pred,
6556       MemoryLocationsKind RequestedMLK) const override {
6557     if (!isValidState())
6558       return false;
6559 
6560     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6561     if (AssumedMLK == NO_LOCATIONS)
6562       return true;
6563 
6564     unsigned Idx = 0;
6565     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6566          CurMLK *= 2, ++Idx) {
6567       if (CurMLK & RequestedMLK)
6568         continue;
6569 
6570       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6571         for (const AccessInfo &AI : *Accesses)
6572           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6573             return false;
6574     }
6575 
6576     return true;
6577   }
6578 
6579   ChangeStatus indicatePessimisticFixpoint() override {
6580     // If we give up and indicate a pessimistic fixpoint this instruction will
6581     // become an access for all potential access kinds:
6582     // TODO: Add pointers for argmemonly and globals to improve the results of
6583     //       checkForAllAccessesToMemoryKind.
6584     bool Changed = false;
6585     MemoryLocationsKind KnownMLK = getKnown();
6586     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6587     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6588       if (!(CurMLK & KnownMLK))
6589         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6590                                   getAccessKindFromInst(I));
6591     return AAMemoryLocation::indicatePessimisticFixpoint();
6592   }
6593 
6594 protected:
6595   /// Helper struct to tie together an instruction that has a read or write
6596   /// effect with the pointer it accesses (if any).
6597   struct AccessInfo {
6598 
6599     /// The instruction that caused the access.
6600     const Instruction *I;
6601 
6602     /// The base pointer that is accessed, or null if unknown.
6603     const Value *Ptr;
6604 
6605     /// The kind of access (read/write/read+write).
6606     AccessKind Kind;
6607 
6608     bool operator==(const AccessInfo &RHS) const {
6609       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6610     }
6611     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6612       if (LHS.I != RHS.I)
6613         return LHS.I < RHS.I;
6614       if (LHS.Ptr != RHS.Ptr)
6615         return LHS.Ptr < RHS.Ptr;
6616       if (LHS.Kind != RHS.Kind)
6617         return LHS.Kind < RHS.Kind;
6618       return false;
6619     }
6620   };
6621 
6622   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6623   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6624   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6625   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6626 
6627   /// Categorize the pointer arguments of CB that might access memory in
6628   /// AccessedLoc and update the state and access map accordingly.
6629   void
6630   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6631                                      AAMemoryLocation::StateType &AccessedLocs,
6632                                      bool &Changed);
6633 
6634   /// Return the kind(s) of location that may be accessed by \p V.
6635   AAMemoryLocation::MemoryLocationsKind
6636   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6637 
6638   /// Return the access kind as determined by \p I.
6639   AccessKind getAccessKindFromInst(const Instruction *I) {
6640     AccessKind AK = READ_WRITE;
6641     if (I) {
6642       AK = I->mayReadFromMemory() ? READ : NONE;
6643       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6644     }
6645     return AK;
6646   }
6647 
6648   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6649   /// an access of kind \p AK to a \p MLK memory location with the access
6650   /// pointer \p Ptr.
6651   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6652                                  MemoryLocationsKind MLK, const Instruction *I,
6653                                  const Value *Ptr, bool &Changed,
6654                                  AccessKind AK = READ_WRITE) {
6655 
6656     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6657     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6658     if (!Accesses)
6659       Accesses = new (Allocator) AccessSet();
6660     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6661     State.removeAssumedBits(MLK);
6662   }
6663 
6664   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6665   /// arguments, and update the state and access map accordingly.
6666   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6667                           AAMemoryLocation::StateType &State, bool &Changed);
6668 
6669   /// Used to allocate access sets.
6670   BumpPtrAllocator &Allocator;
6671 
6672   /// The set of IR attributes AAMemoryLocation deals with.
6673   static const Attribute::AttrKind AttrKinds[4];
6674 };
6675 
6676 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6677     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6678     Attribute::InaccessibleMemOrArgMemOnly};
6679 
6680 void AAMemoryLocationImpl::categorizePtrValue(
6681     Attributor &A, const Instruction &I, const Value &Ptr,
6682     AAMemoryLocation::StateType &State, bool &Changed) {
6683   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6684                     << Ptr << " ["
6685                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6686 
6687   auto StripGEPCB = [](Value *V) -> Value * {
6688     auto *GEP = dyn_cast<GEPOperator>(V);
6689     while (GEP) {
6690       V = GEP->getPointerOperand();
6691       GEP = dyn_cast<GEPOperator>(V);
6692     }
6693     return V;
6694   };
6695 
6696   auto VisitValueCB = [&](Value &V, const Instruction *,
6697                           AAMemoryLocation::StateType &T,
6698                           bool Stripped) -> bool {
6699     // TODO: recognize the TBAA used for constant accesses.
6700     MemoryLocationsKind MLK = NO_LOCATIONS;
6701     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6702     if (isa<UndefValue>(V))
6703       return true;
6704     if (auto *Arg = dyn_cast<Argument>(&V)) {
6705       if (Arg->hasByValAttr())
6706         MLK = NO_LOCAL_MEM;
6707       else
6708         MLK = NO_ARGUMENT_MEM;
6709     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6710       // Reading constant memory is not treated as a read "effect" by the
6711       // function attr pass so we won't neither. Constants defined by TBAA are
6712       // similar. (We know we do not write it because it is constant.)
6713       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6714         if (GVar->isConstant())
6715           return true;
6716 
6717       if (GV->hasLocalLinkage())
6718         MLK = NO_GLOBAL_INTERNAL_MEM;
6719       else
6720         MLK = NO_GLOBAL_EXTERNAL_MEM;
6721     } else if (isa<ConstantPointerNull>(V) &&
6722                !NullPointerIsDefined(getAssociatedFunction(),
6723                                      V.getType()->getPointerAddressSpace())) {
6724       return true;
6725     } else if (isa<AllocaInst>(V)) {
6726       MLK = NO_LOCAL_MEM;
6727     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6728       const auto &NoAliasAA = A.getAAFor<AANoAlias>(
6729           *this, IRPosition::callsite_returned(*CB), DepClassTy::OPTIONAL);
6730       if (NoAliasAA.isAssumedNoAlias())
6731         MLK = NO_MALLOCED_MEM;
6732       else
6733         MLK = NO_UNKOWN_MEM;
6734     } else {
6735       MLK = NO_UNKOWN_MEM;
6736     }
6737 
6738     assert(MLK != NO_LOCATIONS && "No location specified!");
6739     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6740                               getAccessKindFromInst(&I));
6741     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6742                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6743                       << "\n");
6744     return true;
6745   };
6746 
6747   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6748           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6749           /* UseValueSimplify */ true,
6750           /* MaxValues */ 32, StripGEPCB)) {
6751     LLVM_DEBUG(
6752         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6753     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6754                               getAccessKindFromInst(&I));
6755   } else {
6756     LLVM_DEBUG(
6757         dbgs()
6758         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6759         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6760   }
6761 }
6762 
6763 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6764     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6765     bool &Changed) {
6766   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6767 
6768     // Skip non-pointer arguments.
6769     const Value *ArgOp = CB.getArgOperand(ArgNo);
6770     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6771       continue;
6772 
6773     // Skip readnone arguments.
6774     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6775     const auto &ArgOpMemLocationAA =
6776         A.getAAFor<AAMemoryBehavior>(*this, ArgOpIRP, DepClassTy::OPTIONAL);
6777 
6778     if (ArgOpMemLocationAA.isAssumedReadNone())
6779       continue;
6780 
6781     // Categorize potentially accessed pointer arguments as if there was an
6782     // access instruction with them as pointer.
6783     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6784   }
6785 }
6786 
6787 AAMemoryLocation::MemoryLocationsKind
6788 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6789                                                   bool &Changed) {
6790   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6791                     << I << "\n");
6792 
6793   AAMemoryLocation::StateType AccessedLocs;
6794   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6795 
6796   if (auto *CB = dyn_cast<CallBase>(&I)) {
6797 
6798     // First check if we assume any memory is access is visible.
6799     const auto &CBMemLocationAA = A.getAAFor<AAMemoryLocation>(
6800         *this, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
6801     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6802                       << " [" << CBMemLocationAA << "]\n");
6803 
6804     if (CBMemLocationAA.isAssumedReadNone())
6805       return NO_LOCATIONS;
6806 
6807     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6808       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6809                                 Changed, getAccessKindFromInst(&I));
6810       return AccessedLocs.getAssumed();
6811     }
6812 
6813     uint32_t CBAssumedNotAccessedLocs =
6814         CBMemLocationAA.getAssumedNotAccessedLocation();
6815 
6816     // Set the argmemonly and global bit as we handle them separately below.
6817     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6818         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6819 
6820     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6821       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6822         continue;
6823       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6824                                 getAccessKindFromInst(&I));
6825     }
6826 
6827     // Now handle global memory if it might be accessed. This is slightly tricky
6828     // as NO_GLOBAL_MEM has multiple bits set.
6829     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6830     if (HasGlobalAccesses) {
6831       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6832                             AccessKind Kind, MemoryLocationsKind MLK) {
6833         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6834                                   getAccessKindFromInst(&I));
6835         return true;
6836       };
6837       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6838               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6839         return AccessedLocs.getWorstState();
6840     }
6841 
6842     LLVM_DEBUG(
6843         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6844                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6845 
6846     // Now handle argument memory if it might be accessed.
6847     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6848     if (HasArgAccesses)
6849       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6850 
6851     LLVM_DEBUG(
6852         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6853                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6854 
6855     return AccessedLocs.getAssumed();
6856   }
6857 
6858   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6859     LLVM_DEBUG(
6860         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6861                << I << " [" << *Ptr << "]\n");
6862     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6863     return AccessedLocs.getAssumed();
6864   }
6865 
6866   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6867                     << I << "\n");
6868   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6869                             getAccessKindFromInst(&I));
6870   return AccessedLocs.getAssumed();
6871 }
6872 
6873 /// An AA to represent the memory behavior function attributes.
6874 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
6875   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6876       : AAMemoryLocationImpl(IRP, A) {}
6877 
6878   /// See AbstractAttribute::updateImpl(Attributor &A).
6879   virtual ChangeStatus updateImpl(Attributor &A) override {
6880 
6881     const auto &MemBehaviorAA =
6882         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(), DepClassTy::NONE);
6883     if (MemBehaviorAA.isAssumedReadNone()) {
6884       if (MemBehaviorAA.isKnownReadNone())
6885         return indicateOptimisticFixpoint();
6886       assert(isAssumedReadNone() &&
6887              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6888       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6889       return ChangeStatus::UNCHANGED;
6890     }
6891 
6892     // The current assumed state used to determine a change.
6893     auto AssumedState = getAssumed();
6894     bool Changed = false;
6895 
6896     auto CheckRWInst = [&](Instruction &I) {
6897       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6898       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6899                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6900       removeAssumedBits(inverseLocation(MLK, false, false));
6901       // Stop once only the valid bit set in the *not assumed location*, thus
6902       // once we don't actually exclude any memory locations in the state.
6903       return getAssumedNotAccessedLocation() != VALID_STATE;
6904     };
6905 
6906     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6907       return indicatePessimisticFixpoint();
6908 
6909     Changed |= AssumedState != getAssumed();
6910     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6911   }
6912 
6913   /// See AbstractAttribute::trackStatistics()
6914   void trackStatistics() const override {
6915     if (isAssumedReadNone())
6916       STATS_DECLTRACK_FN_ATTR(readnone)
6917     else if (isAssumedArgMemOnly())
6918       STATS_DECLTRACK_FN_ATTR(argmemonly)
6919     else if (isAssumedInaccessibleMemOnly())
6920       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6921     else if (isAssumedInaccessibleOrArgMemOnly())
6922       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6923   }
6924 };
6925 
6926 /// AAMemoryLocation attribute for call sites.
6927 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
6928   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6929       : AAMemoryLocationImpl(IRP, A) {}
6930 
6931   /// See AbstractAttribute::initialize(...).
6932   void initialize(Attributor &A) override {
6933     AAMemoryLocationImpl::initialize(A);
6934     Function *F = getAssociatedFunction();
6935     if (!F || F->isDeclaration())
6936       indicatePessimisticFixpoint();
6937   }
6938 
6939   /// See AbstractAttribute::updateImpl(...).
6940   ChangeStatus updateImpl(Attributor &A) override {
6941     // TODO: Once we have call site specific value information we can provide
6942     //       call site specific liveness liveness information and then it makes
6943     //       sense to specialize attributes for call sites arguments instead of
6944     //       redirecting requests to the callee argument.
6945     Function *F = getAssociatedFunction();
6946     const IRPosition &FnPos = IRPosition::function(*F);
6947     auto &FnAA =
6948         A.getAAFor<AAMemoryLocation>(*this, FnPos, DepClassTy::REQUIRED);
6949     bool Changed = false;
6950     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6951                           AccessKind Kind, MemoryLocationsKind MLK) {
6952       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6953                                 getAccessKindFromInst(I));
6954       return true;
6955     };
6956     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6957       return indicatePessimisticFixpoint();
6958     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6959   }
6960 
6961   /// See AbstractAttribute::trackStatistics()
6962   void trackStatistics() const override {
6963     if (isAssumedReadNone())
6964       STATS_DECLTRACK_CS_ATTR(readnone)
6965   }
6966 };
6967 
6968 /// ------------------ Value Constant Range Attribute -------------------------
6969 
6970 struct AAValueConstantRangeImpl : AAValueConstantRange {
6971   using StateType = IntegerRangeState;
6972   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6973       : AAValueConstantRange(IRP, A) {}
6974 
6975   /// See AbstractAttribute::getAsStr().
6976   const std::string getAsStr() const override {
6977     std::string Str;
6978     llvm::raw_string_ostream OS(Str);
6979     OS << "range(" << getBitWidth() << ")<";
6980     getKnown().print(OS);
6981     OS << " / ";
6982     getAssumed().print(OS);
6983     OS << ">";
6984     return OS.str();
6985   }
6986 
6987   /// Helper function to get a SCEV expr for the associated value at program
6988   /// point \p I.
6989   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6990     if (!getAnchorScope())
6991       return nullptr;
6992 
6993     ScalarEvolution *SE =
6994         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6995             *getAnchorScope());
6996 
6997     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6998         *getAnchorScope());
6999 
7000     if (!SE || !LI)
7001       return nullptr;
7002 
7003     const SCEV *S = SE->getSCEV(&getAssociatedValue());
7004     if (!I)
7005       return S;
7006 
7007     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
7008   }
7009 
7010   /// Helper function to get a range from SCEV for the associated value at
7011   /// program point \p I.
7012   ConstantRange getConstantRangeFromSCEV(Attributor &A,
7013                                          const Instruction *I = nullptr) const {
7014     if (!getAnchorScope())
7015       return getWorstState(getBitWidth());
7016 
7017     ScalarEvolution *SE =
7018         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
7019             *getAnchorScope());
7020 
7021     const SCEV *S = getSCEV(A, I);
7022     if (!SE || !S)
7023       return getWorstState(getBitWidth());
7024 
7025     return SE->getUnsignedRange(S);
7026   }
7027 
7028   /// Helper function to get a range from LVI for the associated value at
7029   /// program point \p I.
7030   ConstantRange
7031   getConstantRangeFromLVI(Attributor &A,
7032                           const Instruction *CtxI = nullptr) const {
7033     if (!getAnchorScope())
7034       return getWorstState(getBitWidth());
7035 
7036     LazyValueInfo *LVI =
7037         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
7038             *getAnchorScope());
7039 
7040     if (!LVI || !CtxI)
7041       return getWorstState(getBitWidth());
7042     return LVI->getConstantRange(&getAssociatedValue(),
7043                                  const_cast<Instruction *>(CtxI));
7044   }
7045 
7046   /// See AAValueConstantRange::getKnownConstantRange(..).
7047   ConstantRange
7048   getKnownConstantRange(Attributor &A,
7049                         const Instruction *CtxI = nullptr) const override {
7050     if (!CtxI || CtxI == getCtxI())
7051       return getKnown();
7052 
7053     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7054     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7055     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
7056   }
7057 
7058   /// See AAValueConstantRange::getAssumedConstantRange(..).
7059   ConstantRange
7060   getAssumedConstantRange(Attributor &A,
7061                           const Instruction *CtxI = nullptr) const override {
7062     // TODO: Make SCEV use Attributor assumption.
7063     //       We may be able to bound a variable range via assumptions in
7064     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
7065     //       evolve to x^2 + x, then we can say that y is in [2, 12].
7066 
7067     if (!CtxI || CtxI == getCtxI())
7068       return getAssumed();
7069 
7070     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7071     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7072     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7073   }
7074 
7075   /// See AbstractAttribute::initialize(..).
7076   void initialize(Attributor &A) override {
7077     // Intersect a range given by SCEV.
7078     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7079 
7080     // Intersect a range given by LVI.
7081     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7082   }
7083 
7084   /// Helper function to create MDNode for range metadata.
7085   static MDNode *
7086   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7087                             const ConstantRange &AssumedConstantRange) {
7088     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7089                                   Ty, AssumedConstantRange.getLower())),
7090                               ConstantAsMetadata::get(ConstantInt::get(
7091                                   Ty, AssumedConstantRange.getUpper()))};
7092     return MDNode::get(Ctx, LowAndHigh);
7093   }
7094 
7095   /// Return true if \p Assumed is included in \p KnownRanges.
7096   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7097 
7098     if (Assumed.isFullSet())
7099       return false;
7100 
7101     if (!KnownRanges)
7102       return true;
7103 
7104     // If multiple ranges are annotated in IR, we give up to annotate assumed
7105     // range for now.
7106 
7107     // TODO:  If there exists a known range which containts assumed range, we
7108     // can say assumed range is better.
7109     if (KnownRanges->getNumOperands() > 2)
7110       return false;
7111 
7112     ConstantInt *Lower =
7113         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7114     ConstantInt *Upper =
7115         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7116 
7117     ConstantRange Known(Lower->getValue(), Upper->getValue());
7118     return Known.contains(Assumed) && Known != Assumed;
7119   }
7120 
7121   /// Helper function to set range metadata.
7122   static bool
7123   setRangeMetadataIfisBetterRange(Instruction *I,
7124                                   const ConstantRange &AssumedConstantRange) {
7125     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7126     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7127       if (!AssumedConstantRange.isEmptySet()) {
7128         I->setMetadata(LLVMContext::MD_range,
7129                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7130                                                  AssumedConstantRange));
7131         return true;
7132       }
7133     }
7134     return false;
7135   }
7136 
7137   /// See AbstractAttribute::manifest()
7138   ChangeStatus manifest(Attributor &A) override {
7139     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7140     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7141     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7142 
7143     auto &V = getAssociatedValue();
7144     if (!AssumedConstantRange.isEmptySet() &&
7145         !AssumedConstantRange.isSingleElement()) {
7146       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7147         assert(I == getCtxI() && "Should not annotate an instruction which is "
7148                                  "not the context instruction");
7149         if (isa<CallInst>(I) || isa<LoadInst>(I))
7150           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7151             Changed = ChangeStatus::CHANGED;
7152       }
7153     }
7154 
7155     return Changed;
7156   }
7157 };
7158 
7159 struct AAValueConstantRangeArgument final
7160     : AAArgumentFromCallSiteArguments<
7161           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7162           true /* BridgeCallBaseContext */> {
7163   using Base = AAArgumentFromCallSiteArguments<
7164       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState,
7165       true /* BridgeCallBaseContext */>;
7166   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7167       : Base(IRP, A) {}
7168 
7169   /// See AbstractAttribute::initialize(..).
7170   void initialize(Attributor &A) override {
7171     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7172       indicatePessimisticFixpoint();
7173     } else {
7174       Base::initialize(A);
7175     }
7176   }
7177 
7178   /// See AbstractAttribute::trackStatistics()
7179   void trackStatistics() const override {
7180     STATS_DECLTRACK_ARG_ATTR(value_range)
7181   }
7182 };
7183 
7184 struct AAValueConstantRangeReturned
7185     : AAReturnedFromReturnedValues<AAValueConstantRange,
7186                                    AAValueConstantRangeImpl,
7187                                    AAValueConstantRangeImpl::StateType,
7188                                    /* PropogateCallBaseContext */ true> {
7189   using Base =
7190       AAReturnedFromReturnedValues<AAValueConstantRange,
7191                                    AAValueConstantRangeImpl,
7192                                    AAValueConstantRangeImpl::StateType,
7193                                    /* PropogateCallBaseContext */ true>;
7194   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7195       : Base(IRP, A) {}
7196 
7197   /// See AbstractAttribute::initialize(...).
7198   void initialize(Attributor &A) override {}
7199 
7200   /// See AbstractAttribute::trackStatistics()
7201   void trackStatistics() const override {
7202     STATS_DECLTRACK_FNRET_ATTR(value_range)
7203   }
7204 };
7205 
7206 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
7207   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7208       : AAValueConstantRangeImpl(IRP, A) {}
7209 
7210   /// See AbstractAttribute::initialize(...).
7211   void initialize(Attributor &A) override {
7212     AAValueConstantRangeImpl::initialize(A);
7213     Value &V = getAssociatedValue();
7214 
7215     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7216       unionAssumed(ConstantRange(C->getValue()));
7217       indicateOptimisticFixpoint();
7218       return;
7219     }
7220 
7221     if (isa<UndefValue>(&V)) {
7222       // Collapse the undef state to 0.
7223       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7224       indicateOptimisticFixpoint();
7225       return;
7226     }
7227 
7228     if (isa<CallBase>(&V))
7229       return;
7230 
7231     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7232       return;
7233     // If it is a load instruction with range metadata, use it.
7234     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7235       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7236         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7237         return;
7238       }
7239 
7240     // We can work with PHI and select instruction as we traverse their operands
7241     // during update.
7242     if (isa<SelectInst>(V) || isa<PHINode>(V))
7243       return;
7244 
7245     // Otherwise we give up.
7246     indicatePessimisticFixpoint();
7247 
7248     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7249                       << getAssociatedValue() << "\n");
7250   }
7251 
7252   bool calculateBinaryOperator(
7253       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7254       const Instruction *CtxI,
7255       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7256     Value *LHS = BinOp->getOperand(0);
7257     Value *RHS = BinOp->getOperand(1);
7258     // TODO: Allow non integers as well.
7259     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7260       return false;
7261 
7262     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7263         *this, IRPosition::value(*LHS, getCallBaseContext()),
7264         DepClassTy::REQUIRED);
7265     QuerriedAAs.push_back(&LHSAA);
7266     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7267 
7268     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7269         *this, IRPosition::value(*RHS, getCallBaseContext()),
7270         DepClassTy::REQUIRED);
7271     QuerriedAAs.push_back(&RHSAA);
7272     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7273 
7274     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7275 
7276     T.unionAssumed(AssumedRange);
7277 
7278     // TODO: Track a known state too.
7279 
7280     return T.isValidState();
7281   }
7282 
7283   bool calculateCastInst(
7284       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7285       const Instruction *CtxI,
7286       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7287     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7288     // TODO: Allow non integers as well.
7289     Value &OpV = *CastI->getOperand(0);
7290     if (!OpV.getType()->isIntegerTy())
7291       return false;
7292 
7293     auto &OpAA = A.getAAFor<AAValueConstantRange>(
7294         *this, IRPosition::value(OpV, getCallBaseContext()),
7295         DepClassTy::REQUIRED);
7296     QuerriedAAs.push_back(&OpAA);
7297     T.unionAssumed(
7298         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7299     return T.isValidState();
7300   }
7301 
7302   bool
7303   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7304                    const Instruction *CtxI,
7305                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7306     Value *LHS = CmpI->getOperand(0);
7307     Value *RHS = CmpI->getOperand(1);
7308     // TODO: Allow non integers as well.
7309     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7310       return false;
7311 
7312     auto &LHSAA = A.getAAFor<AAValueConstantRange>(
7313         *this, IRPosition::value(*LHS, getCallBaseContext()),
7314         DepClassTy::REQUIRED);
7315     QuerriedAAs.push_back(&LHSAA);
7316     auto &RHSAA = A.getAAFor<AAValueConstantRange>(
7317         *this, IRPosition::value(*RHS, getCallBaseContext()),
7318         DepClassTy::REQUIRED);
7319     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7320     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7321 
7322     // If one of them is empty set, we can't decide.
7323     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7324       return true;
7325 
7326     bool MustTrue = false, MustFalse = false;
7327 
7328     auto AllowedRegion =
7329         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7330 
7331     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7332       MustFalse = true;
7333 
7334     if (LHSAARange.icmp(CmpI->getPredicate(), RHSAARange))
7335       MustTrue = true;
7336 
7337     assert((!MustTrue || !MustFalse) &&
7338            "Either MustTrue or MustFalse should be false!");
7339 
7340     if (MustTrue)
7341       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7342     else if (MustFalse)
7343       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7344     else
7345       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7346 
7347     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7348                       << " " << RHSAA << "\n");
7349 
7350     // TODO: Track a known state too.
7351     return T.isValidState();
7352   }
7353 
7354   /// See AbstractAttribute::updateImpl(...).
7355   ChangeStatus updateImpl(Attributor &A) override {
7356     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7357                             IntegerRangeState &T, bool Stripped) -> bool {
7358       Instruction *I = dyn_cast<Instruction>(&V);
7359       if (!I || isa<CallBase>(I)) {
7360 
7361         // If the value is not instruction, we query AA to Attributor.
7362         const auto &AA = A.getAAFor<AAValueConstantRange>(
7363             *this, IRPosition::value(V), DepClassTy::REQUIRED);
7364 
7365         // Clamp operator is not used to utilize a program point CtxI.
7366         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7367 
7368         return T.isValidState();
7369       }
7370 
7371       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7372       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7373         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7374           return false;
7375       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7376         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7377           return false;
7378       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7379         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7380           return false;
7381       } else {
7382         // Give up with other instructions.
7383         // TODO: Add other instructions
7384 
7385         T.indicatePessimisticFixpoint();
7386         return false;
7387       }
7388 
7389       // Catch circular reasoning in a pessimistic way for now.
7390       // TODO: Check how the range evolves and if we stripped anything, see also
7391       //       AADereferenceable or AAAlign for similar situations.
7392       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7393         if (QueriedAA != this)
7394           continue;
7395         // If we are in a stady state we do not need to worry.
7396         if (T.getAssumed() == getState().getAssumed())
7397           continue;
7398         T.indicatePessimisticFixpoint();
7399       }
7400 
7401       return T.isValidState();
7402     };
7403 
7404     IntegerRangeState T(getBitWidth());
7405 
7406     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7407             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7408             /* UseValueSimplify */ false))
7409       return indicatePessimisticFixpoint();
7410 
7411     return clampStateAndIndicateChange(getState(), T);
7412   }
7413 
7414   /// See AbstractAttribute::trackStatistics()
7415   void trackStatistics() const override {
7416     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7417   }
7418 };
7419 
7420 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
7421   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7422       : AAValueConstantRangeImpl(IRP, A) {}
7423 
7424   /// See AbstractAttribute::initialize(...).
7425   ChangeStatus updateImpl(Attributor &A) override {
7426     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7427                      "not be called");
7428   }
7429 
7430   /// See AbstractAttribute::trackStatistics()
7431   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7432 };
7433 
7434 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
7435   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7436       : AAValueConstantRangeFunction(IRP, A) {}
7437 
7438   /// See AbstractAttribute::trackStatistics()
7439   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7440 };
7441 
7442 struct AAValueConstantRangeCallSiteReturned
7443     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7444                                      AAValueConstantRangeImpl,
7445                                      AAValueConstantRangeImpl::StateType,
7446                                      /* IntroduceCallBaseContext */ true> {
7447   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7448       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7449                                        AAValueConstantRangeImpl,
7450                                        AAValueConstantRangeImpl::StateType,
7451                                        /* IntroduceCallBaseContext */ true>(IRP,
7452                                                                             A) {
7453   }
7454 
7455   /// See AbstractAttribute::initialize(...).
7456   void initialize(Attributor &A) override {
7457     // If it is a load instruction with range metadata, use the metadata.
7458     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7459       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7460         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7461 
7462     AAValueConstantRangeImpl::initialize(A);
7463   }
7464 
7465   /// See AbstractAttribute::trackStatistics()
7466   void trackStatistics() const override {
7467     STATS_DECLTRACK_CSRET_ATTR(value_range)
7468   }
7469 };
7470 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
7471   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7472       : AAValueConstantRangeFloating(IRP, A) {}
7473 
7474   /// See AbstractAttribute::manifest()
7475   ChangeStatus manifest(Attributor &A) override {
7476     return ChangeStatus::UNCHANGED;
7477   }
7478 
7479   /// See AbstractAttribute::trackStatistics()
7480   void trackStatistics() const override {
7481     STATS_DECLTRACK_CSARG_ATTR(value_range)
7482   }
7483 };
7484 
7485 /// ------------------ Potential Values Attribute -------------------------
7486 
7487 struct AAPotentialValuesImpl : AAPotentialValues {
7488   using StateType = PotentialConstantIntValuesState;
7489 
7490   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7491       : AAPotentialValues(IRP, A) {}
7492 
7493   /// See AbstractAttribute::getAsStr().
7494   const std::string getAsStr() const override {
7495     std::string Str;
7496     llvm::raw_string_ostream OS(Str);
7497     OS << getState();
7498     return OS.str();
7499   }
7500 
7501   /// See AbstractAttribute::updateImpl(...).
7502   ChangeStatus updateImpl(Attributor &A) override {
7503     return indicatePessimisticFixpoint();
7504   }
7505 };
7506 
7507 struct AAPotentialValuesArgument final
7508     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7509                                       PotentialConstantIntValuesState> {
7510   using Base =
7511       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7512                                       PotentialConstantIntValuesState>;
7513   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7514       : Base(IRP, A) {}
7515 
7516   /// See AbstractAttribute::initialize(..).
7517   void initialize(Attributor &A) override {
7518     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7519       indicatePessimisticFixpoint();
7520     } else {
7521       Base::initialize(A);
7522     }
7523   }
7524 
7525   /// See AbstractAttribute::trackStatistics()
7526   void trackStatistics() const override {
7527     STATS_DECLTRACK_ARG_ATTR(potential_values)
7528   }
7529 };
7530 
7531 struct AAPotentialValuesReturned
7532     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7533   using Base =
7534       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
7535   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7536       : Base(IRP, A) {}
7537 
7538   /// See AbstractAttribute::trackStatistics()
7539   void trackStatistics() const override {
7540     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7541   }
7542 };
7543 
7544 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
7545   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7546       : AAPotentialValuesImpl(IRP, A) {}
7547 
7548   /// See AbstractAttribute::initialize(..).
7549   void initialize(Attributor &A) override {
7550     Value &V = getAssociatedValue();
7551 
7552     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7553       unionAssumed(C->getValue());
7554       indicateOptimisticFixpoint();
7555       return;
7556     }
7557 
7558     if (isa<UndefValue>(&V)) {
7559       unionAssumedWithUndef();
7560       indicateOptimisticFixpoint();
7561       return;
7562     }
7563 
7564     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7565       return;
7566 
7567     if (isa<SelectInst>(V) || isa<PHINode>(V))
7568       return;
7569 
7570     indicatePessimisticFixpoint();
7571 
7572     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7573                       << getAssociatedValue() << "\n");
7574   }
7575 
7576   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7577                                 const APInt &RHS) {
7578     ICmpInst::Predicate Pred = ICI->getPredicate();
7579     switch (Pred) {
7580     case ICmpInst::ICMP_UGT:
7581       return LHS.ugt(RHS);
7582     case ICmpInst::ICMP_SGT:
7583       return LHS.sgt(RHS);
7584     case ICmpInst::ICMP_EQ:
7585       return LHS.eq(RHS);
7586     case ICmpInst::ICMP_UGE:
7587       return LHS.uge(RHS);
7588     case ICmpInst::ICMP_SGE:
7589       return LHS.sge(RHS);
7590     case ICmpInst::ICMP_ULT:
7591       return LHS.ult(RHS);
7592     case ICmpInst::ICMP_SLT:
7593       return LHS.slt(RHS);
7594     case ICmpInst::ICMP_NE:
7595       return LHS.ne(RHS);
7596     case ICmpInst::ICMP_ULE:
7597       return LHS.ule(RHS);
7598     case ICmpInst::ICMP_SLE:
7599       return LHS.sle(RHS);
7600     default:
7601       llvm_unreachable("Invalid ICmp predicate!");
7602     }
7603   }
7604 
7605   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7606                                  uint32_t ResultBitWidth) {
7607     Instruction::CastOps CastOp = CI->getOpcode();
7608     switch (CastOp) {
7609     default:
7610       llvm_unreachable("unsupported or not integer cast");
7611     case Instruction::Trunc:
7612       return Src.trunc(ResultBitWidth);
7613     case Instruction::SExt:
7614       return Src.sext(ResultBitWidth);
7615     case Instruction::ZExt:
7616       return Src.zext(ResultBitWidth);
7617     case Instruction::BitCast:
7618       return Src;
7619     }
7620   }
7621 
7622   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7623                                        const APInt &LHS, const APInt &RHS,
7624                                        bool &SkipOperation, bool &Unsupported) {
7625     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7626     // Unsupported is set to true when the binary operator is not supported.
7627     // SkipOperation is set to true when UB occur with the given operand pair
7628     // (LHS, RHS).
7629     // TODO: we should look at nsw and nuw keywords to handle operations
7630     //       that create poison or undef value.
7631     switch (BinOpcode) {
7632     default:
7633       Unsupported = true;
7634       return LHS;
7635     case Instruction::Add:
7636       return LHS + RHS;
7637     case Instruction::Sub:
7638       return LHS - RHS;
7639     case Instruction::Mul:
7640       return LHS * RHS;
7641     case Instruction::UDiv:
7642       if (RHS.isNullValue()) {
7643         SkipOperation = true;
7644         return LHS;
7645       }
7646       return LHS.udiv(RHS);
7647     case Instruction::SDiv:
7648       if (RHS.isNullValue()) {
7649         SkipOperation = true;
7650         return LHS;
7651       }
7652       return LHS.sdiv(RHS);
7653     case Instruction::URem:
7654       if (RHS.isNullValue()) {
7655         SkipOperation = true;
7656         return LHS;
7657       }
7658       return LHS.urem(RHS);
7659     case Instruction::SRem:
7660       if (RHS.isNullValue()) {
7661         SkipOperation = true;
7662         return LHS;
7663       }
7664       return LHS.srem(RHS);
7665     case Instruction::Shl:
7666       return LHS.shl(RHS);
7667     case Instruction::LShr:
7668       return LHS.lshr(RHS);
7669     case Instruction::AShr:
7670       return LHS.ashr(RHS);
7671     case Instruction::And:
7672       return LHS & RHS;
7673     case Instruction::Or:
7674       return LHS | RHS;
7675     case Instruction::Xor:
7676       return LHS ^ RHS;
7677     }
7678   }
7679 
7680   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7681                                            const APInt &LHS, const APInt &RHS) {
7682     bool SkipOperation = false;
7683     bool Unsupported = false;
7684     APInt Result =
7685         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7686     if (Unsupported)
7687       return false;
7688     // If SkipOperation is true, we can ignore this operand pair (L, R).
7689     if (!SkipOperation)
7690       unionAssumed(Result);
7691     return isValidState();
7692   }
7693 
7694   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7695     auto AssumedBefore = getAssumed();
7696     Value *LHS = ICI->getOperand(0);
7697     Value *RHS = ICI->getOperand(1);
7698     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7699       return indicatePessimisticFixpoint();
7700 
7701     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7702                                                 DepClassTy::REQUIRED);
7703     if (!LHSAA.isValidState())
7704       return indicatePessimisticFixpoint();
7705 
7706     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7707                                                 DepClassTy::REQUIRED);
7708     if (!RHSAA.isValidState())
7709       return indicatePessimisticFixpoint();
7710 
7711     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7712     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7713 
7714     // TODO: make use of undef flag to limit potential values aggressively.
7715     bool MaybeTrue = false, MaybeFalse = false;
7716     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7717     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7718       // The result of any comparison between undefs can be soundly replaced
7719       // with undef.
7720       unionAssumedWithUndef();
7721     } else if (LHSAA.undefIsContained()) {
7722       bool MaybeTrue = false, MaybeFalse = false;
7723       for (const APInt &R : RHSAAPVS) {
7724         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7725         MaybeTrue |= CmpResult;
7726         MaybeFalse |= !CmpResult;
7727         if (MaybeTrue & MaybeFalse)
7728           return indicatePessimisticFixpoint();
7729       }
7730     } else if (RHSAA.undefIsContained()) {
7731       for (const APInt &L : LHSAAPVS) {
7732         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7733         MaybeTrue |= CmpResult;
7734         MaybeFalse |= !CmpResult;
7735         if (MaybeTrue & MaybeFalse)
7736           return indicatePessimisticFixpoint();
7737       }
7738     } else {
7739       for (const APInt &L : LHSAAPVS) {
7740         for (const APInt &R : RHSAAPVS) {
7741           bool CmpResult = calculateICmpInst(ICI, L, R);
7742           MaybeTrue |= CmpResult;
7743           MaybeFalse |= !CmpResult;
7744           if (MaybeTrue & MaybeFalse)
7745             return indicatePessimisticFixpoint();
7746         }
7747       }
7748     }
7749     if (MaybeTrue)
7750       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7751     if (MaybeFalse)
7752       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7753     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7754                                          : ChangeStatus::CHANGED;
7755   }
7756 
7757   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7758     auto AssumedBefore = getAssumed();
7759     Value *LHS = SI->getTrueValue();
7760     Value *RHS = SI->getFalseValue();
7761     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7762       return indicatePessimisticFixpoint();
7763 
7764     // TODO: Use assumed simplified condition value
7765     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7766                                                 DepClassTy::REQUIRED);
7767     if (!LHSAA.isValidState())
7768       return indicatePessimisticFixpoint();
7769 
7770     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7771                                                 DepClassTy::REQUIRED);
7772     if (!RHSAA.isValidState())
7773       return indicatePessimisticFixpoint();
7774 
7775     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7776       // select i1 *, undef , undef => undef
7777       unionAssumedWithUndef();
7778     else {
7779       unionAssumed(LHSAA);
7780       unionAssumed(RHSAA);
7781     }
7782     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7783                                          : ChangeStatus::CHANGED;
7784   }
7785 
7786   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7787     auto AssumedBefore = getAssumed();
7788     if (!CI->isIntegerCast())
7789       return indicatePessimisticFixpoint();
7790     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7791     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7792     Value *Src = CI->getOperand(0);
7793     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src),
7794                                                 DepClassTy::REQUIRED);
7795     if (!SrcAA.isValidState())
7796       return indicatePessimisticFixpoint();
7797     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7798     if (SrcAA.undefIsContained())
7799       unionAssumedWithUndef();
7800     else {
7801       for (const APInt &S : SrcAAPVS) {
7802         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7803         unionAssumed(T);
7804       }
7805     }
7806     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7807                                          : ChangeStatus::CHANGED;
7808   }
7809 
7810   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7811     auto AssumedBefore = getAssumed();
7812     Value *LHS = BinOp->getOperand(0);
7813     Value *RHS = BinOp->getOperand(1);
7814     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7815       return indicatePessimisticFixpoint();
7816 
7817     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS),
7818                                                 DepClassTy::REQUIRED);
7819     if (!LHSAA.isValidState())
7820       return indicatePessimisticFixpoint();
7821 
7822     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS),
7823                                                 DepClassTy::REQUIRED);
7824     if (!RHSAA.isValidState())
7825       return indicatePessimisticFixpoint();
7826 
7827     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7828     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7829     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7830 
7831     // TODO: make use of undef flag to limit potential values aggressively.
7832     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7833       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7834         return indicatePessimisticFixpoint();
7835     } else if (LHSAA.undefIsContained()) {
7836       for (const APInt &R : RHSAAPVS) {
7837         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7838           return indicatePessimisticFixpoint();
7839       }
7840     } else if (RHSAA.undefIsContained()) {
7841       for (const APInt &L : LHSAAPVS) {
7842         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7843           return indicatePessimisticFixpoint();
7844       }
7845     } else {
7846       for (const APInt &L : LHSAAPVS) {
7847         for (const APInt &R : RHSAAPVS) {
7848           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7849             return indicatePessimisticFixpoint();
7850         }
7851       }
7852     }
7853     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7854                                          : ChangeStatus::CHANGED;
7855   }
7856 
7857   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7858     auto AssumedBefore = getAssumed();
7859     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7860       Value *IncomingValue = PHI->getIncomingValue(u);
7861       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7862           *this, IRPosition::value(*IncomingValue), DepClassTy::REQUIRED);
7863       if (!PotentialValuesAA.isValidState())
7864         return indicatePessimisticFixpoint();
7865       if (PotentialValuesAA.undefIsContained())
7866         unionAssumedWithUndef();
7867       else
7868         unionAssumed(PotentialValuesAA.getAssumed());
7869     }
7870     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7871                                          : ChangeStatus::CHANGED;
7872   }
7873 
7874   /// See AbstractAttribute::updateImpl(...).
7875   ChangeStatus updateImpl(Attributor &A) override {
7876     Value &V = getAssociatedValue();
7877     Instruction *I = dyn_cast<Instruction>(&V);
7878 
7879     if (auto *ICI = dyn_cast<ICmpInst>(I))
7880       return updateWithICmpInst(A, ICI);
7881 
7882     if (auto *SI = dyn_cast<SelectInst>(I))
7883       return updateWithSelectInst(A, SI);
7884 
7885     if (auto *CI = dyn_cast<CastInst>(I))
7886       return updateWithCastInst(A, CI);
7887 
7888     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7889       return updateWithBinaryOperator(A, BinOp);
7890 
7891     if (auto *PHI = dyn_cast<PHINode>(I))
7892       return updateWithPHINode(A, PHI);
7893 
7894     return indicatePessimisticFixpoint();
7895   }
7896 
7897   /// See AbstractAttribute::trackStatistics()
7898   void trackStatistics() const override {
7899     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7900   }
7901 };
7902 
7903 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
7904   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7905       : AAPotentialValuesImpl(IRP, A) {}
7906 
7907   /// See AbstractAttribute::initialize(...).
7908   ChangeStatus updateImpl(Attributor &A) override {
7909     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7910                      "not be called");
7911   }
7912 
7913   /// See AbstractAttribute::trackStatistics()
7914   void trackStatistics() const override {
7915     STATS_DECLTRACK_FN_ATTR(potential_values)
7916   }
7917 };
7918 
7919 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
7920   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7921       : AAPotentialValuesFunction(IRP, A) {}
7922 
7923   /// See AbstractAttribute::trackStatistics()
7924   void trackStatistics() const override {
7925     STATS_DECLTRACK_CS_ATTR(potential_values)
7926   }
7927 };
7928 
7929 struct AAPotentialValuesCallSiteReturned
7930     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
7931   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7932       : AACallSiteReturnedFromReturned<AAPotentialValues,
7933                                        AAPotentialValuesImpl>(IRP, A) {}
7934 
7935   /// See AbstractAttribute::trackStatistics()
7936   void trackStatistics() const override {
7937     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7938   }
7939 };
7940 
7941 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
7942   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7943       : AAPotentialValuesFloating(IRP, A) {}
7944 
7945   /// See AbstractAttribute::initialize(..).
7946   void initialize(Attributor &A) override {
7947     Value &V = getAssociatedValue();
7948 
7949     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7950       unionAssumed(C->getValue());
7951       indicateOptimisticFixpoint();
7952       return;
7953     }
7954 
7955     if (isa<UndefValue>(&V)) {
7956       unionAssumedWithUndef();
7957       indicateOptimisticFixpoint();
7958       return;
7959     }
7960   }
7961 
7962   /// See AbstractAttribute::updateImpl(...).
7963   ChangeStatus updateImpl(Attributor &A) override {
7964     Value &V = getAssociatedValue();
7965     auto AssumedBefore = getAssumed();
7966     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V),
7967                                              DepClassTy::REQUIRED);
7968     const auto &S = AA.getAssumed();
7969     unionAssumed(S);
7970     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7971                                          : ChangeStatus::CHANGED;
7972   }
7973 
7974   /// See AbstractAttribute::trackStatistics()
7975   void trackStatistics() const override {
7976     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7977   }
7978 };
7979 
7980 /// ------------------------ NoUndef Attribute ---------------------------------
7981 struct AANoUndefImpl : AANoUndef {
7982   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7983 
7984   /// See AbstractAttribute::initialize(...).
7985   void initialize(Attributor &A) override {
7986     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7987       indicateOptimisticFixpoint();
7988       return;
7989     }
7990     Value &V = getAssociatedValue();
7991     if (isa<UndefValue>(V))
7992       indicatePessimisticFixpoint();
7993     else if (isa<FreezeInst>(V))
7994       indicateOptimisticFixpoint();
7995     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7996              isGuaranteedNotToBeUndefOrPoison(&V))
7997       indicateOptimisticFixpoint();
7998     else
7999       AANoUndef::initialize(A);
8000   }
8001 
8002   /// See followUsesInMBEC
8003   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
8004                        AANoUndef::StateType &State) {
8005     const Value *UseV = U->get();
8006     const DominatorTree *DT = nullptr;
8007     AssumptionCache *AC = nullptr;
8008     InformationCache &InfoCache = A.getInfoCache();
8009     if (Function *F = getAnchorScope()) {
8010       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
8011       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
8012     }
8013     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
8014     bool TrackUse = false;
8015     // Track use for instructions which must produce undef or poison bits when
8016     // at least one operand contains such bits.
8017     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
8018       TrackUse = true;
8019     return TrackUse;
8020   }
8021 
8022   /// See AbstractAttribute::getAsStr().
8023   const std::string getAsStr() const override {
8024     return getAssumed() ? "noundef" : "may-undef-or-poison";
8025   }
8026 
8027   ChangeStatus manifest(Attributor &A) override {
8028     // We don't manifest noundef attribute for dead positions because the
8029     // associated values with dead positions would be replaced with undef
8030     // values.
8031     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
8032       return ChangeStatus::UNCHANGED;
8033     // A position whose simplified value does not have any value is
8034     // considered to be dead. We don't manifest noundef in such positions for
8035     // the same reason above.
8036     auto &ValueSimplifyAA =
8037         A.getAAFor<AAValueSimplify>(*this, getIRPosition(), DepClassTy::NONE);
8038     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
8039       return ChangeStatus::UNCHANGED;
8040     return AANoUndef::manifest(A);
8041   }
8042 };
8043 
8044 struct AANoUndefFloating : public AANoUndefImpl {
8045   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
8046       : AANoUndefImpl(IRP, A) {}
8047 
8048   /// See AbstractAttribute::initialize(...).
8049   void initialize(Attributor &A) override {
8050     AANoUndefImpl::initialize(A);
8051     if (!getState().isAtFixpoint())
8052       if (Instruction *CtxI = getCtxI())
8053         followUsesInMBEC(*this, A, getState(), *CtxI);
8054   }
8055 
8056   /// See AbstractAttribute::updateImpl(...).
8057   ChangeStatus updateImpl(Attributor &A) override {
8058     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
8059                             AANoUndef::StateType &T, bool Stripped) -> bool {
8060       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V),
8061                                              DepClassTy::REQUIRED);
8062       if (!Stripped && this == &AA) {
8063         T.indicatePessimisticFixpoint();
8064       } else {
8065         const AANoUndef::StateType &S =
8066             static_cast<const AANoUndef::StateType &>(AA.getState());
8067         T ^= S;
8068       }
8069       return T.isValidState();
8070     };
8071 
8072     StateType T;
8073     if (!genericValueTraversal<AANoUndef, StateType>(
8074             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
8075       return indicatePessimisticFixpoint();
8076 
8077     return clampStateAndIndicateChange(getState(), T);
8078   }
8079 
8080   /// See AbstractAttribute::trackStatistics()
8081   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8082 };
8083 
8084 struct AANoUndefReturned final
8085     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
8086   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
8087       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
8088 
8089   /// See AbstractAttribute::trackStatistics()
8090   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8091 };
8092 
8093 struct AANoUndefArgument final
8094     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
8095   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8096       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8097 
8098   /// See AbstractAttribute::trackStatistics()
8099   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8100 };
8101 
8102 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
8103   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8104       : AANoUndefFloating(IRP, A) {}
8105 
8106   /// See AbstractAttribute::trackStatistics()
8107   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8108 };
8109 
8110 struct AANoUndefCallSiteReturned final
8111     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
8112   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8113       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8114 
8115   /// See AbstractAttribute::trackStatistics()
8116   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8117 };
8118 } // namespace
8119 
8120 const char AAReturnedValues::ID = 0;
8121 const char AANoUnwind::ID = 0;
8122 const char AANoSync::ID = 0;
8123 const char AANoFree::ID = 0;
8124 const char AANonNull::ID = 0;
8125 const char AANoRecurse::ID = 0;
8126 const char AAWillReturn::ID = 0;
8127 const char AAUndefinedBehavior::ID = 0;
8128 const char AANoAlias::ID = 0;
8129 const char AAReachability::ID = 0;
8130 const char AANoReturn::ID = 0;
8131 const char AAIsDead::ID = 0;
8132 const char AADereferenceable::ID = 0;
8133 const char AAAlign::ID = 0;
8134 const char AANoCapture::ID = 0;
8135 const char AAValueSimplify::ID = 0;
8136 const char AAHeapToStack::ID = 0;
8137 const char AAPrivatizablePtr::ID = 0;
8138 const char AAMemoryBehavior::ID = 0;
8139 const char AAMemoryLocation::ID = 0;
8140 const char AAValueConstantRange::ID = 0;
8141 const char AAPotentialValues::ID = 0;
8142 const char AANoUndef::ID = 0;
8143 
8144 // Macro magic to create the static generator function for attributes that
8145 // follow the naming scheme.
8146 
8147 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8148   case IRPosition::PK:                                                         \
8149     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8150 
8151 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8152   case IRPosition::PK:                                                         \
8153     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8154     ++NumAAs;                                                                  \
8155     break;
8156 
8157 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8158   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8159     CLASS *AA = nullptr;                                                       \
8160     switch (IRP.getPositionKind()) {                                           \
8161       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8162       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8163       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8164       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8165       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8166       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8167       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8168       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8169     }                                                                          \
8170     return *AA;                                                                \
8171   }
8172 
8173 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8174   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8175     CLASS *AA = nullptr;                                                       \
8176     switch (IRP.getPositionKind()) {                                           \
8177       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8178       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8179       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8180       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8181       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8182       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8183       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8184       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8185     }                                                                          \
8186     return *AA;                                                                \
8187   }
8188 
8189 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8190   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8191     CLASS *AA = nullptr;                                                       \
8192     switch (IRP.getPositionKind()) {                                           \
8193       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8194       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8195       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8196       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8197       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8198       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8199       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8200       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8201     }                                                                          \
8202     return *AA;                                                                \
8203   }
8204 
8205 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8206   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8207     CLASS *AA = nullptr;                                                       \
8208     switch (IRP.getPositionKind()) {                                           \
8209       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8210       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8211       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8212       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8213       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8214       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8215       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8216       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8217     }                                                                          \
8218     return *AA;                                                                \
8219   }
8220 
8221 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8222   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8223     CLASS *AA = nullptr;                                                       \
8224     switch (IRP.getPositionKind()) {                                           \
8225       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8226       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8227       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8228       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8229       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8230       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8231       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8232       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8233     }                                                                          \
8234     return *AA;                                                                \
8235   }
8236 
8237 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8238 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8239 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8240 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8241 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8242 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8243 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8244 
8245 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8246 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8247 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8248 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8249 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8250 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8251 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8252 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8253 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8254 
8255 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8256 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8257 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8258 
8259 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8260 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8261 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8262 
8263 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8264 
8265 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8266 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8267 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8268 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8269 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8270 #undef SWITCH_PK_CREATE
8271 #undef SWITCH_PK_INV
8272